source
stringlengths
3
92
c
stringlengths
26
2.25M
mlp_openmp.c
/** * @file app.c * @brief Template for a Host Application Source File. * */ #include <assert.h> #include <getopt.h> #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "../../support/common.h" #include "../../support/timer.h" #include "shared.hpp" T **A; T *B; T *C; // Create input arrays static void init_data(T **A, T *B, unsigned int m_size, unsigned int n_size) { for (unsigned int l = 0; l < NUM_LAYERS; l++) for (unsigned int i = 0; i < m_size * n_size; i++) { if (i % 100 < 98) { A[l][i] = 0; } else { A[l][i] = (l + i) % 2; } } for (unsigned int i = 0; i < n_size; i++) { if (i % 50 < 48) { B[i] = 0; } else { B[i] = i % 2; } } } // Compute output in the host static void mlp_host(T *C, T **A, T *B, unsigned int m_size, unsigned int n_size) { for (unsigned int nl = 0; nl < NUM_LAYERS; nl++) { for (unsigned int m = 0; m < m_size; m++) { C[m] = 0; } #pragma omp parallel for for (unsigned int m = 0; m < m_size; m++) { for (unsigned int n = 0; n < n_size; n++) { C[m] += A[nl][m * n_size + n] * B[n]; } C[m] = max(0, C[m]); } for (unsigned int n = 0; n < n_size; n++) { B[n] = C[n]; } } } static uint64_t mlp_host_sum(uint64_t n_size, uint64_t m_size) { uint64_t sum = 0; for (uint64_t m = 0; m < n_size; m++) { sum += B[m]; } return sum; } // Params --------------------------------------------------------------------- typedef struct Params { char *dpu_type; int nr_of_ranks; int input_size_n; int input_size_m; int n_warmup; int n_reps; } Params; void usage() { fprintf(stderr, "\nUsage: ./program [options]" "\n" "\nGeneral options:" "\n -h help" "\n -d <D> DPU type (default=fsim)" "\n -r <R> # of ranks (default=2)" "\n" "\nBenchmark-specific options:" "\n -i <I> input size (default=8M elements)" "\n"); } struct Params input_params(int argc, char **argv) { struct Params p; p.dpu_type = "fsim"; p.nr_of_ranks = 1; p.input_size_n = 1 << 9; p.input_size_m = 1 << 9; p.n_warmup = 2; p.n_reps = 3; int opt; while ((opt = getopt(argc, argv, "hd:r:i:")) >= 0) { switch (opt) { case 'h': usage(); exit(0); break; case 'd': p.dpu_type = optarg; break; case 'r': p.nr_of_ranks = atoi(optarg); break; case 'n': p.input_size_n = atoi(optarg); break; case 'm': p.input_size_m = atoi(optarg); break; default: fprintf(stderr, "\nUnrecognized option!\n"); usage(); exit(0); } } assert(p.nr_of_ranks > 0 && "Invalid # of ranks!"); return p; } /** * @brief Main of the Host Application. */ int main(int argc, char **argv) { struct Params p = input_params(argc, argv); uint64_t n_size = 8192; uint64_t m_size = 20480; Timer timer; A = malloc(NUM_LAYERS * sizeof(T *)); for (int l = 0; l < NUM_LAYERS; l++) A[l] = malloc(n_size * m_size * sizeof(unsigned int)); B = malloc(m_size * sizeof(unsigned int)); C = malloc(m_size * sizeof(unsigned int)); // Create an input file with arbitrary data. init_data(A, B, m_size, n_size); start(&timer, 0, 1); start_region(); mlp_host(C, A, B, n_size, m_size); end_region(); stop(&timer, 0); uint32_t sum = mlp_host_sum(n_size, m_size); printf("Kernel "); print(&timer, 0, 1); printf("\n"); printf("SUM = %d \n", sum); for (int l = 0; l < NUM_LAYERS; l++) free(A[l]); free(A); free(B); free(C); return 0; }
DRB071-targetparallelfor-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* use of omp target: len is not mapped. It should be firstprivate within target. */ int main(int argc, char* argv[]) { int i; int len = 1000; int a[len]; for (i=0; i<len; i++) a[i]= i; #pragma omp target map(a[0:len]) #pragma omp parallel for schedule(dynamic) for (i=0;i< len;i++) a[i]=a[i]+1; return 0; }
update_ops_matrix_diagonal_multi.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif void multi_qubit_diagonal_matrix_gate(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* diagonal_element, CTYPE* state, ITYPE dim) { // matrix dim, mask, buffer const ITYPE matrix_dim = 1ULL << target_qubit_index_count; const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count); // insert index const UINT* sorted_insert_index_list = create_sorted_ui_list(target_qubit_index_list, target_qubit_index_count); // loop variables const ITYPE loop_dim = dim >> target_qubit_index_count; ITYPE state_index; #ifdef _OPENMP UINT threshold = 14; UINT default_thread_count = omp_get_max_threads(); if (dim < (((ITYPE)1) << threshold)) omp_set_num_threads(1); #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) { UINT insert_index = sorted_insert_index_list[cursor]; basis_0 = insert_zero_to_basis_index(basis_0, 1ULL << insert_index, insert_index); } // compute matrix-vector multiply for (ITYPE y = 0; y < matrix_dim; ++y) { state[basis_0 ^ matrix_mask_list[y]] *= diagonal_element[y]; } } #ifdef _OPENMP omp_set_num_threads(default_thread_count); #endif free((UINT*)sorted_insert_index_list); free((ITYPE*)matrix_mask_list); } void multi_qubit_control_multi_qubit_diagonal_matrix_gate(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* diagonal_element, CTYPE* state, ITYPE dim) { // matrix dim, mask, buffer const ITYPE matrix_dim = 1ULL << target_qubit_index_count; ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count); // insert index const UINT insert_index_count = target_qubit_index_count + control_qubit_index_count; UINT* sorted_insert_index_list = create_sorted_ui_list_list(target_qubit_index_list, target_qubit_index_count, control_qubit_index_list, control_qubit_index_count); // control mask ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); // loop varaibles const ITYPE loop_dim = dim >> (target_qubit_index_count + control_qubit_index_count); ITYPE state_index; #ifdef _OPENMP UINT threshold = 14; UINT default_thread_count = omp_get_max_threads(); if (dim < (((ITYPE)1) << threshold)) omp_set_num_threads(1); #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_count; cursor++) { UINT insert_index = sorted_insert_index_list[cursor]; basis_0 = insert_zero_to_basis_index(basis_0, 1ULL << insert_index, insert_index); } // flip control masks basis_0 ^= control_mask; // compute matrix mul for (ITYPE y = 0; y < matrix_dim; ++y) { state[basis_0 ^ matrix_mask_list[y]] *= diagonal_element[y]; } } #ifdef _OPENMP omp_set_num_threads(default_thread_count); #endif free(sorted_insert_index_list); free(matrix_mask_list); }
GB_unaryop__ainv_uint8_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint8_int16 // op(A') function: GB_tran__ainv_uint8_int16 // C type: uint8_t // A type: int16_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint8_int16 ( uint8_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint8_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 32; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
bwtdt_fmt_plug.c
/* bwtdt s.md5(sha1(md5(s.sha1(p)))) cracker patch for JtR. Hacked together * during August, 2013 by Dhiru Kholia <dhiru at openwall.com> * * This software is Copyright (c) 2013, Dhiru Kholia <dhiru at openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * bwtdt hash ==> s.md5(sha1(md5(s.sha1(p)))) * * JimF, July 2012. * Made small change in hex_encode 10x improvement in speed. Also some other * changes. Should be a thin dyanamic. * * Apparently, BWTDT stands for "Bad Way To Do This" and was made up just * for the CMIYC 2013 contest. magnum thinks it should be moved to unused/ */ #if FMT_EXTERNS_H extern struct fmt_main fmt_zzz_bwtdt; #elif FMT_REGISTERS_H john_register_one(&fmt_zzz_bwtdt); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "sha.h" #include "md5.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP #include <omp.h> #define OMP_SCALE 8 // XXX #endif #define FORMAT_LABEL "bwtdt" #define FORMAT_NAME "bwtdt s.md5(sha1(md5(s.sha1(p))))" #define ALGORITHM_NAME "MD5+SHA1 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 /* change to 0 once there's any speedup for "many salts" */ #define PLAINTEXT_LENGTH 32 #define BINARY_SIZE 16 #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests bwtdt_tests[] = { {"00000000c3a62c15bb2275a52308b73706813634", "password"}, {"b077b5a4140084441ce2f8f3922732f09a34bf9f", "antineoplastic3"}, {"8365ef6da923f658e74c4a851e625f41db7ffe74", "paramedical7"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct custom_salt { unsigned char salt[8]; } *cur_salt; static inline void hex_encode(unsigned char *str, int len, unsigned char *out) { int i; for (i = 0; i < len; ++i) { out[0] = itoa16[str[i]>>4]; out[1] = itoa16[str[i]&0xF]; out += 2; } } static void init(struct fmt_main *self) { #ifdef _OPENMP static int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } // XXX implement me FOR CRYING OUT LOUD! static int valid(char *ciphertext, struct fmt_main *self) { return (strlen(ciphertext) == 40); } static void *get_salt(char *ciphertext) { static struct custom_salt cs; unsigned char *out = cs.salt; char *p; p = ciphertext; strncpy((char*)out, p, 8); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = ciphertext + 8; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { // s.md5(sha1(md5(s.sha1(p)))) unsigned char hexhash[40]; unsigned char buf[20]; SHA_CTX sctx; MD5_CTX mctx; SHA1_Init(&sctx); SHA1_Update(&sctx, saved_key[index], strlen(saved_key[index])); SHA1_Final(buf, &sctx); hex_encode(buf, 20, hexhash); MD5_Init(&mctx); MD5_Update(&mctx, cur_salt->salt, 8); MD5_Update(&mctx, hexhash, 40); MD5_Final(buf, &mctx); hex_encode(buf, 16, hexhash); SHA1_Init(&sctx); SHA1_Update(&sctx, hexhash, 32); SHA1_Final(buf, &sctx); hex_encode(buf, 20, hexhash); MD5_Init(&mctx); MD5_Update(&mctx, hexhash, 40); MD5_Final((unsigned char*)crypt_out[index], &mctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (*((ARCH_WORD_32*)binary) == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return *((ARCH_WORD_32*)binary) == crypt_out[index][0]; } static int cmp_exact(char *source, int index) { void *binary = get_binary(source); return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static void bwtdt_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } /* * The zzz is a little protection against Dhiru's vandalism, * it hopefully makes the format come last in auto-detection. */ struct fmt_main fmt_zzz_bwtdt = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif bwtdt_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, bwtdt_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unaryop__abs_int16_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int16_int64 // op(A') function: GB_tran__abs_int16_int64 // C type: int16_t // A type: int64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ int64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int16_int64 ( int16_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int16_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_int8_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int8_int16 // op(A') function: GB_tran__lnot_int8_int16 // C type: int8_t // A type: int16_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int16_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT8 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int8_int16 ( int8_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int8_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB032-truedepfirstdimension-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The outer loop has a loop-carried true dependence. Data race pair: b[i][j]@69:7 vs. b[i-1][j-1]@69:15 */ #include <stdlib.h> int main(int argc, char* argv[]) { int i,j; int len = 1000; if (argc>1) len = atoi(argv[1]); int n=len, m=len; double b[len][len]; #pragma omp parallel for private(i, j) for (i=0; i<n; i++) #pragma omp parallel for private(j) for (j=0; j<m; j++) b[i][j] = 0.5; for (i=1;i<n;i++) #pragma omp parallel for private(j) for (j=1;j<m;j++) b[i][j]=b[i-1][j-1]; for (i=0; i<n; i++) for (j=0; j<m; j++) printf("%lf\n",b[i][j]); return 0; }
threadpool.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <string> #include <vector> #include <functional> #include <memory> #if defined(__GNUC__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #else #pragma warning(push) #pragma warning(disable : 4267) #endif #include <unsupported/Eigen/CXX11/ThreadPool> #if defined(__GNUC__) #pragma GCC diagnostic pop #else #pragma warning(pop) #endif namespace onnxruntime { namespace concurrency { /** * Generic class for instantiating thread pools. * Don't put any object of this type into a global variable in a Win32 DLL. */ class ThreadPool { public: /* Initializes a thread pool given the current environment. */ ThreadPool(const std::string& name, int num_threads); /* Enqueue a unit of work. */ void Schedule(std::function<void()> fn); /* Schedule work in the interval [0, total). */ void ParallelFor(int32_t total, std::function<void(int32_t)> fn); /* Schedule work in the interval [0, total), with calls split into (num_batches) batches. */ void BatchParallelFor(int32_t total, std::function<void(int32_t)> fn, int32_t num_batches = 0); /* Schedule work in the interval [first, last]. */ void ParallelForRange(int64_t first, int64_t last, std::function<void(int64_t, int64_t)> fn); // This is not supported until the latest Eigen // void SetStealPartitions(const std::vector<std::pair<unsigned, unsigned>>& partitions); /** Tries to call the given function in parallel, with calls split into (num_batches) batches. **/ template <typename F> inline static void TryBatchParallelFor(concurrency::ThreadPool* tp, int32_t total, F&& fn, int32_t num_batches = 0) { if (tp != nullptr) { if (num_batches <= 0) { num_batches = tp->NumThreads() + 1; } tp->BatchParallelFor(total, std::forward<F>(fn), num_batches); } else { #ifdef USE_OPENMP #pragma omp parallel for #endif for (int32_t i = 0; i < total; ++i) { fn(i); } } } /** Tries to call the given function in parallel. **/ template <typename F> inline static void TryParallelFor(concurrency::ThreadPool* tp, int32_t total, F&& fn) { if (tp != nullptr) { tp->ParallelFor(total, std::forward<F>(fn)); } else { #ifdef USE_OPENMP #pragma omp parallel for #endif for (int32_t i = 0; i < total; ++i) { fn(i); } } } int NumThreads() const; int CurrentThreadId() const; Eigen::ThreadPool& GetHandler() { return impl_; } private: Eigen::ThreadPool impl_; }; } // namespace concurrency } // namespace onnxruntime
omp_ztrsm_batch.c
/** * @file omp_ztrsm_batch.c * * @brief BBLAS omp_ztrsm_batch double _Complex routine. * * BBLAS is a software package provided by Univ. of Manchester, * Univ. of Tennessee. * * @version 1.0.0 * @author Samuel D. Relton * @author Pedro V. Lara * @author Mawussi Zounon * @date 2016-02-20 * **/ #ifndef DOXYGEN_SHOULD_SKIP_THIS /** * Code generation * @precisions normal z -> c d s **/ #endif #include<cblas.h> #include "bblas_omp.h" #include "bblas.h" #include <omp.h> #define COMPLEX /** Purpose ------- <b>ztrsm_batch</b> is an OpenMP version of ztrsm_batch. It solves for X in one of the matrix equations op( arrayA[i] )*X = alpha*arrayB[i], or X*op( arrayA[i] ) = alpha[i]*arrayB[i], where op( X ) is one of - op( X ) = X or - op( X ) = X**T or - op( X ) = X**H, alpha[i] is a scalar, X and B are M[i] by N[i] matrices, and arrayA[i] is a unit or non-unit, upper or lower triangular matrix. The solution matrix X overwrites arrayB[i] on exit. Fixed and Variable Batch Operations ----------------------------------- Two types of batch operation are supported depending upon the value of batch_opts. When <tt>batch_opts = BBLAS_VARIABLE</tt> - all parameters that are arrays must have length at least batch_count. - all parameters that are arrays must have all values set. When <tt>batch_opts = BBLAS_FIXED</tt> - all parameters that are arrays (except for arrayA, arrayB, and info) must have length at least one. - all parameters that are arrays (except for arrayA, arrayB, and info) need only to have their first value set. This means that for a <tt>BBLAS_FIXED</tt> batch, the values of side[0], uplo[0], transA[0], diag[0], M[0], N[0], alpha[0], lda[0], and ldb[0] are used for all computations. Parameters ---------- @param[in] side Array of <tt>enum BBLAS_SIDE</tt>. Each element side[i] specifies whether op( arrayA[i] ) appears on the left or right side of the operation as follows: - = 'BblasLeft' op( arrayA[i] )*X = alpha[i]*arrayB[i]. - = 'BblasRight' X*op( arrayA[i] ) = alpha[i]*arrayB[i]. @param[in] uplo Array of <tt>enum BBLAS_UPLO</tt>. On entry, uplo[i] specifies whether the matrix arrayA[i] is upper or lower triangular as follows: - = 'BblasUpper' arrayA[i] is an upper triangular matrix. - = 'BblasLower' arrayA[i] is a lower triangular matrix. @param[in] transA Array of <tt>enum BBLAS_TRANS</tt>. On entry, trans[i] specifies the form of op( arrayA[i] ) to be used in the operation as follows: - = 'BblasNoTrans' op( arrayA[i] ) = arrayA[i]. - = 'BblasTrans' op( arrayA[i] ) = arrayA[i]**T. - = 'BblasConjTrans' op( arrayA[i] ) = arrayA'[i]**H. @param[in] diag - Array of <tt>enum BBLAS_DIAG</tt>. On entry, diag[i] specifies whether or not arrayA[i] is unit triangular as follows: - = 'BblasUnit' arrayA[i] is assumed to be unit triangular. - = 'BblasNonUnit' arrayA[i] is not assumed to be unit triangular. @param[in] M Array of <tt>int</tt>. Each element M[i] specifies the number of rows of the matrix arrayB[i]. M[i] must be greater than zero. @param[in] N Array of <tt>int</tt>. Each element N[i] specifies the number of columns of the matrix arrayB[i]. N[i] must be greater than zero. @param[in] alpha Array of COMPLEX_16 When alpha[i] is set to zero arrayA[i] is not referenced and arrayB[i] need not be set before entry. @param[in] arrayA Array of pointers. Each element arrayA[i] is a pointer to a COMPLEX_16 matrix of dimension lda[i] by Ka[i], where Ka[i] = M[i] when side[i] = BblasLeft and is N[i] otherwise. When using side[i] = BblasLeft the M[i] by M[i] part of arrayA[i] must contain the triangular matrix: when uplo[i] = BblasUpper, the upper triangular part of arrayA[i] must contain the matrix whilst the strictly lower triangular part is not used; similarly when uplo[i] = BblasLower, the lower triangular part of arrayA[i] must contain the matrix whilst the strictly upper triangular part is not used. When using side[i] = BblasRight the N[i] by N[i] part of arrayA[i] must contain the symmetric matrix: when uplo[i] = BblasUpper, the upper triangular part of arrayA[i] must contain the matrix whilst the strictly lower triangular part is not used; similarly when uplo[i] = BblasLower, the lower triangular part of arrayA[i] must contain the matrix whilst the strictly upper triangular part is not used. Note that when diag = BblasUnit the diagonal elements of arrayA[i] are not used either, they are assumed to be equal to one. @param[in] lda Array of <tt>int</tt>. On entry, lda[i] specifies the first dimension of arrayA[i] as declared in the calling (sub) program. When side[i] = BblasLeft then lda[i] must be at least max( 1, M[i] ), otherwise lda[i] must be at least max( 1, N[i] ). @param[in,out] arrayB Array of pointers. Each element arrayB[i] is a pointer to a COMPLEX_16 matrix of dimension ldb[i] by N[i]. The leading M[i] by N[i] part of arrayB[i] must contain the matrix elements. On exit is arrayB[i] overwritten by the solution matrix X. @param[in] ldb Array of <tt>int</tt>. Each element ldb[i] specifies the first dimension of arrayB[i] as declared in the calling (sub) program. Each element ldb[i] must be at least max( 1, M[i] ). @param[in] batch_count <tt>int</tt> The number of matrices to operate on. @param[in] batch_opts <tt>enum BBLAS_OPTS</tt> One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of batch operation required. @param[out] info Array of <tt>int</tt>. Each element info[i] is the error return code of the ith ztrsm in the batch, these need not be set on entry. The error codes can be found in bblas_macros.h. **/ void omp_ztrsm_batch( const enum BBLAS_SIDE *side, const enum BBLAS_UPLO *uplo, const enum BBLAS_TRANS *transA, const enum BBLAS_DIAG *diag, const int *M, const int *N, const BBLAS_Complex64_t *alpha, const BBLAS_Complex64_t **arrayA, const int *lda, BBLAS_Complex64_t **arrayB, const int *ldb, const int batch_count, enum BBLAS_OPTS batch_opts, int *info) { /*Local variables */ int first_index = 0; int batch_iter; int LDA; char func_name[15] = "ztrsm_batch"; /* Check input arguments */ if (batch_count < 0) { xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1); } if (batch_opts == BBLAS_FIXED) { if ((side[first_index] != BblasLeft) && (side[first_index] != BblasRight)) { xerbla_batch(func_name, BBLAS_ERR_SIDE, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_SIDE; } return; } if ((uplo[first_index] != BblasUpper) && (uplo[first_index] != BblasLower)) { xerbla_batch(func_name, BBLAS_ERR_UPLO, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_UPLO; } return; } if ((transA[first_index] != BblasNoTrans) && (transA[first_index] != BblasTrans) && (transA[first_index] != BblasConjTrans)) { xerbla_batch(func_name, BBLAS_ERR_TRANSA, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_TRANSA; } return; } if ((diag[first_index] != BblasNonUnit) && (diag[first_index] != BblasUnit)) { xerbla_batch(func_name, BBLAS_ERR_DIAG, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_DIAG; } return; } if (M[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_M, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_M; } return; } if (N[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_N; } return; } if (side[first_index] == BblasLeft) { LDA = M[first_index]; } else { LDA = N[first_index]; } if (lda[first_index] < max(1, LDA)) { xerbla_batch(func_name, BBLAS_ERR_LDA, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDA; } return; } if (ldb[first_index] < max(1, M[first_index])) { xerbla_batch(func_name, BBLAS_ERR_LDB, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDB; } return; } /* particular case */ if (min(M[first_index], N[first_index]) == 0) { for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_SUCCESS; } return; } #pragma omp parallel for private(batch_iter) for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { /*Call to cblas_ztrsm */ cblas_ztrsm( BblasColMajor, side[first_index], uplo[first_index], transA[first_index], diag[first_index], M[first_index], N[first_index], CBLAS_SADDR(alpha[first_index]), arrayA[batch_iter], lda[first_index], arrayB[batch_iter], ldb[first_index]); /* Successful */ info[batch_iter] = BBLAS_SUCCESS; } /*END FIXED SIZE FOR LOOP */ }else if (batch_opts == BBLAS_VARIABLE) { #pragma omp parallel for private(batch_iter,LDA) for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { /* Check input arguments */ if ((side[batch_iter] != BblasLeft) && (side[batch_iter] != BblasRight)) { xerbla_batch(func_name, BBLAS_ERR_SIDE, batch_iter); info[batch_iter] = BBLAS_ERR_SIDE; continue; } if ((uplo[batch_iter] != BblasUpper) && (uplo[batch_iter] != BblasLower)) { xerbla_batch(func_name, BBLAS_ERR_UPLO, batch_iter); info[batch_iter] = BBLAS_ERR_UPLO; continue; } if ((transA[batch_iter] != BblasNoTrans) && (transA[batch_iter] != BblasTrans) && (transA[batch_iter] != BblasConjTrans)) { xerbla_batch(func_name, BBLAS_ERR_TRANSA, batch_iter); info[batch_iter] = BBLAS_ERR_TRANSA; continue; } if (M[batch_iter] < 0) { xerbla_batch(func_name, BBLAS_ERR_M, batch_iter); info[batch_iter] = BBLAS_ERR_M; continue; } if (N[batch_iter] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, batch_iter); info[batch_iter] = BBLAS_ERR_N; continue; } if (side[batch_iter] == BblasLeft) { LDA = M[batch_iter]; } else { LDA = N[batch_iter]; } if (lda[batch_iter] < max(1, LDA)) { xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter); info[batch_iter] = BBLAS_ERR_LDA; continue; } if (ldb[batch_iter] < max(1, M[batch_iter])) { xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter); info[batch_iter] = BBLAS_ERR_LDC; continue; } /* particular case */ if (min(M[batch_iter], N[batch_iter]) == 0) { info[batch_iter] = BBLAS_SUCCESS; continue; } cblas_ztrsm( BblasColMajor, side[batch_iter], uplo[batch_iter], transA[batch_iter], diag[batch_iter], M[batch_iter], N[batch_iter], CBLAS_SADDR(alpha[batch_iter]), arrayA[batch_iter], lda[batch_iter], arrayB[batch_iter], ldb[batch_iter]); /* Successful */ info[batch_iter] = BBLAS_SUCCESS; } } else { xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1); } } #undef COMPLEX
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 32; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-7,8)),ceild(8*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(4*t1+Ny+5,32)),floord(8*t2+Ny+4,32)),floord(8*t1-8*t2+Nz+Ny+3,32));t3++) { for (t4=max(max(max(0,ceild(t1-31,32)),ceild(8*t2-Nz-124,128)),ceild(32*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(4*t1+Nx+5,128)),floord(8*t2+Nx+4,128)),floord(32*t3+Nx+28,128)),floord(8*t1-8*t2+Nz+Nx+3,128));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),32*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),32*t3+30),128*t4+126),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
computepi.c
#include<stdio.h> #include <omp.h> int main(){ const int num_steps = 100000; double x, sum = 0.0; const double step = 1.0/(double)num_steps; int i; #pragma omp parallel for reduction(+:sum) private(x) for (i=1; i<= num_steps; i++){ x = (double)(i-0.5)*step; sum += 4.0/(1.0+x*x); } const double pi = step * sum; printf("The value if pi is %f\n", pi); }
pi3.c
/* * This code calculates pi using the formula to calculate * the atan(z) which is the integral from 0 to z of 1/(1+x*x) * times dx. atan(1) is 45 degrees or pi/4 * * Interestingly enough, this version disagrees with the others in * the last two decimal places shown. (floating point error * accumulation?) */ #include <omp.h> static long num_steps = 100000; /* number of intervals */ double step; /* the size of the interval - dx */ #define NUM_THREADS 2 void main () { int i; /* Loop control variable */ double x; /* Actually not used */ double pi; /* final results */ double sum[NUM_THREADS]; /* Maintains partial sum for thread */ step = 1.0 / ( double ) num_steps; /* * This may be done more flexibly by using an environment * variable instead. */ omp_set_num_threads( NUM_THREADS ); /* * Each thread executes the code below */ #pragma omp parallel { double x; /* The current x position for function evaluation */ int id; /* The identity of the thread */ id = omp_get_thread_num(); sum[ id ] = 0; /* * We didn't need to make i private because the pragma * below does that for us. However, it may cost us some * as more processes may be spawned */ #pragma omp for /* * Calculate the integral */ for ( i = id; i < num_steps; i++ ) { x = ( i + 0.5 ) * step; sum[ id ] += 4.0 / ( 1.0 + x * x ); } } /* * Multiply by dx */ for ( i = 0, pi = 0.0; i < NUM_THREADS; i++ ) pi += sum[ i ] * step; printf( "The computed value of pi is %f\n", pi ); }
compare.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP AAA RRRR EEEEE % % C O O MM MM P P A A R R E % % C O O M M M PPPP AAAAA RRRR EEE % % C O O M M P A A R R E % % CCCC OOO M M P A A R R EEEEE % % % % % % MagickCore Image Comparison Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/statistic.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p a r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompareImages() compares one or more pixel channels of an image to a % reconstructed image and returns the difference image. % % The format of the CompareImages method is: % % Image *CompareImages(const Image *image,const Image *reconstruct_image, % const MetricType metric,double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ static size_t GetImageChannels(const Image *image) { register ssize_t i; size_t channels; channels=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) != 0) channels++; } return(channels == 0 ? (size_t) 1 : channels); } MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image, const MetricType metric,double *distortion,ExceptionInfo *exception) { CacheView *highlight_view, *image_view, *reconstruct_view; double fuzz; const char *artifact; Image *difference_image, *highlight_image; MagickBooleanType status; PixelInfo highlight, lowlight; RectangleInfo geometry; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageDistortion(image,reconstruct_image,metric,distortion, exception); if (status == MagickFalse) return((Image *) NULL); columns=MagickMax(image->columns,reconstruct_image->columns); rows=MagickMax(image->rows,reconstruct_image->rows); SetGeometry(image,&geometry); geometry.width=columns; geometry.height=rows; difference_image=ExtentImage(image,&geometry,exception); if (difference_image == (Image *) NULL) return((Image *) NULL); (void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception); highlight_image=CloneImage(image,columns,rows,MagickTrue,exception); if (highlight_image == (Image *) NULL) { difference_image=DestroyImage(difference_image); return((Image *) NULL); } status=SetImageStorageClass(highlight_image,DirectClass,exception); if (status == MagickFalse) { difference_image=DestroyImage(difference_image); highlight_image=DestroyImage(highlight_image); return((Image *) NULL); } (void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception); (void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception); artifact=GetImageArtifact(image,"highlight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception); (void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception); artifact=GetImageArtifact(image,"lowlight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception); /* Generate difference image. */ status=MagickTrue; fuzz=GetFuzzyColorDistance(image,reconstruct_image); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); highlight_view=AcquireAuthenticCacheView(highlight_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,highlight_image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p, *magick_restrict q; register Quantum *magick_restrict r; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) || (r == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; MagickStatusType difference; register ssize_t i; if (GetPixelReadMask(image,p) == 0) { SetPixelViaPixelInfo(highlight_image,&lowlight,r); p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); r+=GetPixelChannels(highlight_image); continue; } difference=MagickFalse; Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q); if ((distance*distance) > fuzz) { difference=MagickTrue; break; } } if (difference == MagickFalse) SetPixelViaPixelInfo(highlight_image,&lowlight,r); else SetPixelViaPixelInfo(highlight_image,&highlight,r); p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); r+=GetPixelChannels(highlight_image); } sync=SyncCacheViewAuthenticPixels(highlight_view,exception); if (sync == MagickFalse) status=MagickFalse; } highlight_view=DestroyCacheView(highlight_view); reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); (void) CompositeImage(difference_image,highlight_image,image->compose, MagickTrue,0,0,exception); (void) SetImageAlphaChannel(difference_image,OffAlphaChannel,exception); highlight_image=DestroyImage(highlight_image); if (status == MagickFalse) difference_image=DestroyImage(difference_image); return(difference_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D i s t o r t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDistortion() compares one or more pixel channels of an image to a % reconstructed image and returns the specified distortion metric. % % The format of the GetImageDistortion method is: % % MagickBooleanType GetImageDistortion(const Image *image, % const Image *reconstruct_image,const MetricType metric, % double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double fuzz; MagickBooleanType status; size_t columns, rows; ssize_t y; /* Compute the absolute difference in pixels between two images. */ status=MagickTrue; fuzz=GetFuzzyColorDistance(image,reconstruct_image); rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t j, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; MagickBooleanType difference; register ssize_t i; if (GetPixelReadMask(image,p) == 0) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } difference=MagickFalse; Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q); if ((distance*distance) > fuzz) { channel_distortion[i]++; difference=MagickTrue; } } if (difference != MagickFalse) channel_distortion[CompositePixelChannel]++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType GetFuzzDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if (GetPixelReadMask(image,p) == 0) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance*distance; channel_distortion[CompositePixelChannel]+=distance*distance; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetFuzzDistortion) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); for (j=0; j <= MaxPixelChannels; j++) distortion[j]/=((double) columns*rows); distortion[CompositePixelChannel]/=(double) GetImageChannels(image); distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]); return(status); } static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if (GetPixelReadMask(image,p) == 0) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=QuantumScale*fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance; channel_distortion[CompositePixelChannel]+=distance; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); for (j=0; j <= MaxPixelChannels; j++) distortion[j]/=((double) columns*rows); distortion[CompositePixelChannel]/=(double) GetImageChannels(image); return(status); } static MagickBooleanType GetMeanErrorPerPixel(Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; double area, maximum_error, mean_error; size_t columns, rows; ssize_t y; status=MagickTrue; area=0.0; maximum_error=0.0; mean_error=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if (GetPixelReadMask(image,p) == 0) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q)); distortion[i]+=distance; distortion[CompositePixelChannel]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area; image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area; image->error.normalized_maximum_error=QuantumScale*maximum_error; return(status); } static MagickBooleanType GetMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if (GetPixelReadMask(image,p) == 0) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance*distance; channel_distortion[CompositePixelChannel]+=distance*distance; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanSquaredError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); for (j=0; j <= MaxPixelChannels; j++) distortion[j]/=((double) columns*rows); distortion[CompositePixelChannel]/=GetImageChannels(image); return(status); } static MagickBooleanType GetNormalizedCrossCorrelationDistortion( const Image *image,const Image *reconstruct_image,double *distortion, ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *image_view, *reconstruct_view; ChannelStatistics *image_statistics, *reconstruct_statistics; double area; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t columns, rows; ssize_t y; /* Normalize to account for variation due to lighting and exposure condition. */ image_statistics=GetImageStatistics(image,exception); reconstruct_statistics=GetImageStatistics(reconstruct_image,exception); if ((image_statistics == (ChannelStatistics *) NULL) || (reconstruct_statistics == (ChannelStatistics *) NULL)) { if (image_statistics != (ChannelStatistics *) NULL) image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); if (reconstruct_statistics != (ChannelStatistics *) NULL) reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); return(MagickFalse); } status=MagickTrue; progress=0; for (i=0; i <= MaxPixelChannels; i++) distortion[i]=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=1.0/((double) columns*rows); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; if (GetPixelReadMask(image,p) == 0) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*(image->alpha_trait != UndefinedPixelTrait ? GetPixelAlpha(image,p) : OpaqueAlpha); Da=QuantumScale*(reconstruct_image->alpha_trait != UndefinedPixelTrait ? GetPixelAlpha(reconstruct_image,q) : OpaqueAlpha); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) { distortion[i]+=area*QuantumScale*(p[i]- image_statistics[channel].mean)*(GetPixelChannel( reconstruct_image,channel,q)- reconstruct_statistics[channel].mean); } else { distortion[i]+=area*QuantumScale*(Sa*p[i]- image_statistics[channel].mean)*(Da*GetPixelChannel( reconstruct_image,channel,q)- reconstruct_statistics[channel].mean); } } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SimilarityImageTag,progress++,rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); /* Divide by the standard deviation. */ distortion[CompositePixelChannel]=0.0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma; PixelChannel channel=GetPixelChannelChannel(image,i); gamma=image_statistics[channel].standard_deviation* reconstruct_statistics[channel].standard_deviation; gamma=PerceptibleReciprocal(gamma); distortion[i]=QuantumRange*gamma*distortion[i]; distortion[CompositePixelChannel]+=distortion[i]*distortion[i]; } distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/ GetImageChannels(image)); /* Free resources. */ reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); return(status); } static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t j, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if (GetPixelReadMask(image,p) == 0) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=QuantumScale*fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); if (distance > channel_distortion[i]) channel_distortion[i]=distance; if (distance > channel_distortion[CompositePixelChannel]) channel_distortion[CompositePixelChannel]=distance; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPeakAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) if (channel_distortion[j] > distortion[j]) distortion[j]=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; register ssize_t i; status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception); for (i=0; i <= MaxPixelChannels; i++) distortion[i]=20.0*MagickLog10((double) 1.0/sqrt(distortion[i])); return(status); } static MagickBooleanType GetPerceptualHashDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { ChannelPerceptualHash *image_phash, *reconstruct_phash; ssize_t channel; /* Compute perceptual hash in the sRGB colorspace. */ image_phash=GetImagePerceptualHash(image,exception); if (image_phash == (ChannelPerceptualHash *) NULL) return(MagickFalse); reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception); if (reconstruct_phash == (ChannelPerceptualHash *) NULL) { image_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(image_phash); return(MagickFalse); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (channel=0; channel < MaxPixelChannels; channel++) { double difference; register ssize_t i; difference=0.0; for (i=0; i < MaximumNumberOfImageMoments; i++) { double alpha, beta; alpha=image_phash[channel].srgb_hu_phash[i]; beta=reconstruct_phash[channel].srgb_hu_phash[i]; difference+=(beta-alpha)*(beta-alpha); } distortion[channel]+=difference; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPerceptualHashDistortion) #endif distortion[CompositePixelChannel]+=difference; } /* Compute perceptual hash in the HCLP colorspace. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (channel=0; channel < MaxPixelChannels; channel++) { double difference; register ssize_t i; difference=0.0; for (i=0; i < MaximumNumberOfImageMoments; i++) { double alpha, beta; alpha=image_phash[channel].hclp_hu_phash[i]; beta=reconstruct_phash[channel].hclp_hu_phash[i]; difference+=(beta-alpha)*(beta-alpha); } distortion[channel]+=difference; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPerceptualHashDistortion) #endif distortion[CompositePixelChannel]+=difference; } /* Free resources. */ reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory( reconstruct_phash); image_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(image_phash); return(MagickTrue); } static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; register ssize_t i; status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception); for (i=0; i <= MaxPixelChannels; i++) distortion[i]=sqrt(distortion[i]); return(status); } MagickExport MagickBooleanType GetImageDistortion(Image *image, const Image *reconstruct_image,const MetricType metric,double *distortion, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Get image distortion. */ length=MaxPixelChannels+1; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_distortion,0,length* sizeof(*channel_distortion)); switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion, exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case MeanErrorPerPixelErrorMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, channel_distortion,exception); break; } case PerceptualHashErrorMetric: { status=GetPerceptualHashDistortion(image,reconstruct_image, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } } *distortion=channel_distortion[CompositePixelChannel]; channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); (void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(), *distortion); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D i s t o r t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDistortions() compares the pixel channels of an image to a % reconstructed image and returns the specified distortion metric for each % channel. % % The format of the GetImageDistortions method is: % % double *GetImageDistortions(const Image *image, % const Image *reconstruct_image,const MetricType metric, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o exception: return any errors or warnings in this structure. % */ MagickExport double *GetImageDistortions(Image *image, const Image *reconstruct_image,const MetricType metric, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Get image distortion. */ length=MaxPixelChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_distortion,0,length* sizeof(*channel_distortion)); status=MagickTrue; switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion, exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case MeanErrorPerPixelErrorMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, channel_distortion,exception); break; } case PerceptualHashErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } } if (status == MagickFalse) { channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); return((double *) NULL); } return(channel_distortion); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e s E q u a l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImagesEqual() compare the pixels of two images and returns immediately % if any pixel is not identical. % % The format of the IsImagesEqual method is: % % MagickBooleanType IsImagesEqual(const Image *image, % const Image *reconstruct_image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImagesEqual(const Image *image, const Image *reconstruct_image,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) columns; x++) { register ssize_t i; if (GetPixelReadMask(image,p) == 0) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image, channel,q)); if (distance >= MagickEpsilon) break; } if (i < (ssize_t) GetPixelChannels(image)) break; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } if (x < (ssize_t) columns) break; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(y < (ssize_t) rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r M e t r i c % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorMetric() measures the difference between colors at each pixel % location of two images. A value other than 0 means the colors match % exactly. Otherwise an error measure is computed by summing over all % pixels in an image the distance squared in RGB space between each image % pixel and its corresponding pixel in the reconstruct image. The error % measure is assigned to these image members: % % o mean_error_per_pixel: The mean error for any single pixel in % the image. % % o normalized_mean_error: The normalized mean quantization error for % any single pixel in the image. This distance measure is normalized to % a range between 0 and 1. It is independent of the range of red, green, % and blue values in the image. % % o normalized_maximum_error: The normalized maximum quantization % error for any single pixel in the image. This distance measure is % normalized to a range between 0 and 1. It is independent of the range % of red, green, and blue values in your image. % % A small normalized mean square error, accessed as % image->normalized_mean_error, suggests the images are very similar in % spatial layout and color. % % The format of the SetImageColorMetric method is: % % MagickBooleanType SetImageColorMetric(Image *image, % const Image *reconstruct_image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorMetric(Image *image, const Image *reconstruct_image,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area, maximum_error, mean_error, mean_error_per_pixel; MagickBooleanType status; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); area=0.0; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) columns; x++) { register ssize_t i; if (GetPixelReadMask(image,p) == 0) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image, channel,q)); if (distance >= MagickEpsilon) { mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; } area++; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area); image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale* mean_error/area); image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error); status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i m i l a r i t y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SimilarityImage() compares the reference image of the image and returns the % best match offset. In addition, it returns a similarity image such that an % exact match location is completely white and if none of the pixels match, % black, otherwise some gray level in-between. % % The format of the SimilarityImageImage method is: % % Image *SimilarityImage(const Image *image,const Image *reference, % const MetricType metric,const double similarity_threshold, % RectangleInfo *offset,double *similarity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reference: find an area of the image that closely resembles this image. % % o metric: the metric. % % o similarity_threshold: minimum distortion for (sub)image match. % % o offset: the best match offset of the reference image within the image. % % o similarity: the computed similarity between the images. % % o exception: return any errors or warnings in this structure. % */ static double GetSimilarityMetric(const Image *image,const Image *reference, const MetricType metric,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { double distortion; Image *similarity_image; MagickBooleanType status; RectangleInfo geometry; SetGeometry(reference,&geometry); geometry.x=x_offset; geometry.y=y_offset; similarity_image=CropImage(image,&geometry,exception); if (similarity_image == (Image *) NULL) return(0.0); distortion=0.0; status=GetImageDistortion(similarity_image,reference,metric,&distortion, exception); similarity_image=DestroyImage(similarity_image); if (status == MagickFalse) return(0.0); return(distortion); } MagickExport Image *SimilarityImage(const Image *image,const Image *reference, const MetricType metric,const double similarity_threshold, RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *similarity_view; Image *similarity_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(offset != (RectangleInfo *) NULL); SetGeometry(reference,offset); *similarity_metric=MagickMaximumValue; similarity_image=CloneImage(image,image->columns-reference->columns+1, image->rows-reference->rows+1,MagickTrue,exception); if (similarity_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(similarity_image,DirectClass,exception); if (status == MagickFalse) { similarity_image=DestroyImage(similarity_image); return((Image *) NULL); } (void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel, exception); /* Measure similarity of reference image against image. */ status=MagickTrue; progress=0; similarity_view=AcquireAuthenticCacheView(similarity_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ shared(progress,status,similarity_metric) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++) { double similarity; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) continue; q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) break; similarity=GetSimilarityMetric(image,reference,metric,x,y,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif if ((metric == NormalizedCrossCorrelationErrorMetric) || (metric == UndefinedErrorMetric)) similarity=1.0-similarity; if (similarity < *similarity_metric) { offset->x=x; offset->y=y; *similarity_metric=similarity; } if (metric == PerceptualHashErrorMetric) similarity=MagickMin(0.01*similarity,1.0); if (GetPixelReadMask(similarity_image,q) == 0) { SetPixelBackgoundColor(similarity_image,q); q+=GetPixelChannels(similarity_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image, channel); if ((traits == UndefinedPixelTrait) || (similarity_traits == UndefinedPixelTrait) || ((similarity_traits & UpdatePixelTrait) == 0)) continue; SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange- QuantumRange*similarity),q); } q+=GetPixelChannels(similarity_image); } if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif proceed=SetImageProgress(image,SimilarityImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } similarity_view=DestroyCacheView(similarity_view); if (status == MagickFalse) similarity_image=DestroyImage(similarity_image); return(similarity_image); }
ark_heat1D_ompdev.c
/*--------------------------------------------------------------- * Programmer(s): Shelby Lockhart @ LLNL *--------------------------------------------------------------- * Based on the serial example ark_heat1D.c developed by * Daniel R. Reynolds and parallelized with OpenMP 4.5. *--------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2019, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End *--------------------------------------------------------------- * Example problem: * * The following test simulates a simple 1D heat equation, * u_t = k*u_xx + f * for t in [0, 10], x in [0, 1], with initial conditions * u(0,x) = 0 * Dirichlet boundary conditions, i.e. * u_t(t,0) = u_t(t,1) = 0, * and a point-source heating term, * f = 1 for x=0.5. * * The spatial derivatives are computed using second-order * centered differences, with the data distributed over N points * on a uniform spatial grid. * * This program solves the problem with either an ERK or DIRK * method. For the DIRK method, we use a Newton iteration with * the SUNLinSol_PCG linear solver, and a user-supplied Jacobian-vector * product routine. * * 100 outputs are printed at equal intervals, and run statistics * are printed at the end. *---------------------------------------------------------------*/ /* Header files */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <arkode/arkode_arkstep.h> /* prototypes for ARKStep fcts., consts. */ #include <nvector/nvector_openmpdev.h> /* OpenMPDEV N_Vector types, fcts., macros */ #include <sunlinsol/sunlinsol_pcg.h> /* access to PCG SUNLinearSolver */ #include <sundials/sundials_types.h> /* defs. of realtype, sunindextype, etc */ #include <sundials/sundials_math.h> /* def. of SUNRsqrt, etc. */ #ifdef _OPENMP #include <omp.h> /* OpenMP functions */ #endif #if defined(SUNDIALS_EXTENDED_PRECISION) #define GSYM "Lg" #define ESYM "Le" #define FSYM "Lf" #else #define GSYM "g" #define ESYM "e" #define FSYM "f" #endif /* user data structure */ typedef struct { sunindextype N; /* number of intervals */ realtype dx; /* mesh spacing */ realtype k; /* diffusion coefficient */ } *UserData; /* User-supplied Functions Called by the Solver */ static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data); static int Jac(N_Vector v, N_Vector Jv, realtype t, N_Vector y, N_Vector fy, void *user_data, N_Vector tmp); /* Private function to check function return values */ static int check_flag(void *flagvalue, const char *funcname, int opt); /* Main Program */ int main() { /* general problem parameters */ realtype T0 = RCONST(0.0); /* initial time */ realtype Tf = RCONST(1.0); /* final time */ int Nt = 10; /* total number of output times */ realtype rtol = 1.e-6; /* relative tolerance */ realtype atol = 1.e-10; /* absolute tolerance */ UserData udata = NULL; realtype *data; sunindextype N = 201; /* spatial mesh size */ realtype k = 0.5; /* heat conductivity */ sunindextype i; /* general problem variables */ int flag; /* reusable error-checking flag */ N_Vector y = NULL; /* empty vector for storing solution */ SUNLinearSolver LS = NULL; /* empty linear solver object */ void *arkode_mem = NULL; /* empty ARKStep memory structure */ FILE *FID, *UFID; realtype t, dTout, tout; int iout; long int nst, nst_a, nfe, nfi, nsetups, nli, nJv, nlcf, nni, ncfn, netf; /* allocate and fill udata structure */ udata = (UserData) malloc(sizeof(*udata)); udata->N = N; udata->k = k; udata->dx = RCONST(1.0)/(1.0*N-1.0); /* mesh spacing */ /* Initial problem output */ printf("\n1D Heat PDE test problem:\n"); printf(" N = %li\n", (long int) udata->N); printf(" diffusion coefficient: k = %"GSYM"\n", udata->k); /* Initialize data structures */ y = N_VNew_OpenMPDEV(N); /* Create OpenMPDEV vector for solution */ if (check_flag((void *) y, "N_VNew_Serial", 0)) return 1; N_VConst(0.0, y); /* Set initial conditions */ /* Call ARKStepCreate to initialize the integrator memory and specify the right-hand side function in y'=f(t,y), the inital time T0, and the initial dependent variable vector y. Note: since this problem is fully implicit, we set f_E to NULL and f_I to f. */ arkode_mem = ARKStepCreate(NULL, f, T0, y); if (check_flag((void *) arkode_mem, "ARKStepCreate", 0)) return 1; /* Set routines */ flag = ARKStepSetUserData(arkode_mem, (void *) udata); /* Pass udata to user functions */ if (check_flag(&flag, "ARKStepSetUserData", 1)) return 1; flag = ARKStepSetMaxNumSteps(arkode_mem, 10000); /* Increase max num steps */ if (check_flag(&flag, "ARKStepSetMaxNumSteps", 1)) return 1; flag = ARKStepSetPredictorMethod(arkode_mem, 1); /* Specify maximum-order predictor */ if (check_flag(&flag, "ARKStepSetPredictorMethod", 1)) return 1; flag = ARKStepSStolerances(arkode_mem, rtol, atol); /* Specify tolerances */ if (check_flag(&flag, "ARKStepSStolerances", 1)) return 1; /* Initialize PCG solver -- no preconditioning, with up to N iterations */ LS = SUNLinSol_PCG(y, 0, N); if (check_flag((void *)LS, "SUNLinSol_PCG", 0)) return 1; /* Linear solver interface -- set user-supplied J*v routine (no 'jtsetup' required) */ flag = ARKStepSetLinearSolver(arkode_mem, LS, NULL); /* Attach linear solver to ARKStep */ if (check_flag(&flag, "ARKStepSetLinearSolver", 1)) return 1; flag = ARKStepSetJacTimes(arkode_mem, NULL, Jac); /* Set the Jacobian routine */ if (check_flag(&flag, "ARKStepSetJacTimes", 1)) return 1; /* Specify linearly implicit RHS, with non-time-dependent Jacobian */ flag = ARKStepSetLinear(arkode_mem, 0); if (check_flag(&flag, "ARKStepSetLinear", 1)) return 1; /* output mesh to disk */ FID=fopen("heat_mesh.txt","w"); for (i=0; i<N; i++) fprintf(FID," %.16"ESYM"\n", udata->dx*i); fclose(FID); /* Open output stream for results, access data array */ UFID=fopen("heat1D.txt","w"); data = N_VGetHostArrayPointer_OpenMPDEV(y); N_VCopyFromDevice_OpenMPDEV(y); /* always copy back from device before printing */ /* output initial condition to disk */ for (i=0; i<N; i++) fprintf(UFID," %.16"ESYM"", data[i]); fprintf(UFID,"\n"); /* Main time-stepping loop: calls ARKStepEvolve to perform the integration, then prints results. Stops when the final time has been reached */ t = T0; dTout = (Tf-T0)/Nt; tout = T0+dTout; printf(" t ||u||_rms\n"); printf(" -------------------------\n"); printf(" %10.6"FSYM" %10.6"FSYM"\n", t, SUNRsqrt(N_VDotProd(y,y)/N)); for (iout=0; iout<Nt; iout++) { flag = ARKStepEvolve(arkode_mem, tout, y, &t, ARK_NORMAL); /* call integrator */ if (check_flag(&flag, "ARKStep", 1)) break; printf(" %10.6"FSYM" %10.6"FSYM"\n", t, SUNRsqrt(N_VDotProd(y,y)/N)); /* print solution stats */ if (flag >= 0) { /* successful solve: update output time */ tout += dTout; tout = (tout > Tf) ? Tf : tout; } else { /* unsuccessful solve: break */ fprintf(stderr,"Solver failure, stopping integration\n"); break; } N_VCopyFromDevice_OpenMPDEV(y); /* copy back from device before printing solution */ /* output results to disk */ for (i=0; i<N; i++) fprintf(UFID," %.16"ESYM"", data[i]); fprintf(UFID,"\n"); } printf(" -------------------------\n"); fclose(UFID); /* Print some final statistics */ flag = ARKStepGetNumSteps(arkode_mem, &nst); check_flag(&flag, "ARKStepGetNumSteps", 1); flag = ARKStepGetNumStepAttempts(arkode_mem, &nst_a); check_flag(&flag, "ARKStepGetNumStepAttempts", 1); flag = ARKStepGetNumRhsEvals(arkode_mem, &nfe, &nfi); check_flag(&flag, "ARKStepGetNumRhsEvals", 1); flag = ARKStepGetNumLinSolvSetups(arkode_mem, &nsetups); check_flag(&flag, "ARKStepGetNumLinSolvSetups", 1); flag = ARKStepGetNumErrTestFails(arkode_mem, &netf); check_flag(&flag, "ARKStepGetNumErrTestFails", 1); flag = ARKStepGetNumNonlinSolvIters(arkode_mem, &nni); check_flag(&flag, "ARKStepGetNumNonlinSolvIters", 1); flag = ARKStepGetNumNonlinSolvConvFails(arkode_mem, &ncfn); check_flag(&flag, "ARKStepGetNumNonlinSolvConvFails", 1); flag = ARKStepGetNumLinIters(arkode_mem, &nli); check_flag(&flag, "ARKStepGetNumLinIters", 1); flag = ARKStepGetNumJtimesEvals(arkode_mem, &nJv); check_flag(&flag, "ARKStepGetNumJtimesEvals", 1); flag = ARKStepGetNumLinConvFails(arkode_mem, &nlcf); check_flag(&flag, "ARKStepGetNumLinConvFails", 1); printf("\nFinal Solver Statistics:\n"); printf(" Internal solver steps = %li (attempted = %li)\n", nst, nst_a); printf(" Total RHS evals: Fe = %li, Fi = %li\n", nfe, nfi); printf(" Total linear solver setups = %li\n", nsetups); printf(" Total linear iterations = %li\n", nli); printf(" Total number of Jacobian-vector products = %li\n", nJv); printf(" Total number of linear solver convergence failures = %li\n", nlcf); printf(" Total number of Newton iterations = %li\n", nni); printf(" Total number of nonlinear solver convergence failures = %li\n", ncfn); printf(" Total number of error test failures = %li\n", netf); /* Clean up and return with successful completion */ N_VDestroy(y); /* Free vectors */ free(udata); /* Free user data */ ARKStepFree(&arkode_mem); /* Free integrator memory */ SUNLinSolFree(LS); /* Free linear solver */ return 0; } /*-------------------------------- * Functions called by the solver *--------------------------------*/ /* f routine to compute the ODE RHS function f(t,y). */ static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data) { UserData udata = (UserData) user_data; /* access problem data */ sunindextype N = udata->N; /* set variable shortcuts */ realtype k = udata->k; realtype dx = udata->dx; realtype *Y=NULL, *Ydot=NULL; realtype c1, c2; sunindextype i, isource; int dev; dev = omp_get_default_device(); Y = N_VGetDeviceArrayPointer_OpenMPDEV(y); /* access data arrays */ if (check_flag((void *) Y, "N_VGetDeviceArrayPointer_OpenMPDEV", 0)) return 1; Ydot = N_VGetDeviceArrayPointer_OpenMPDEV(ydot); if (check_flag((void *) Ydot, "N_VGetDeviceArrayPointer_OpenMPDEV", 0)) return 1; N_VConst(0.0, ydot); /* Initialize ydot to zero */ /* iterate over domain, computing all equations */ c1 = k/dx/dx; c2 = -RCONST(2.0)*k/dx/dx; isource = N/2; #pragma omp target map(to:c1,c2,isource,N,dx) is_device_ptr(Ydot,Y) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i=1; i<N-1; i++) Ydot[i] = c1*Y[i-1] + c2*Y[i] + c1*Y[i+1]; #pragma omp target is_device_ptr(Ydot) device(dev) { Ydot[isource] += 0.01/dx; /* source term */ } return 0; /* Return with success */ } /* Jacobian routine to compute J(t,y) = df/dy. */ static int Jac(N_Vector v, N_Vector Jv, realtype t, N_Vector y, N_Vector fy, void *user_data, N_Vector tmp) { UserData udata = (UserData) user_data; /* variable shortcuts */ sunindextype N = udata->N; realtype k = udata->k; realtype dx = udata->dx; realtype *V=NULL, *JV=NULL; realtype c1, c2; sunindextype i; int dev; dev = omp_get_default_device(); V = N_VGetDeviceArrayPointer_OpenMPDEV(v); /* access data arrays */ if (check_flag((void *) V, "N_VGetDeviceArrayPointer_OpenMPDEV", 0)) return 1; JV = N_VGetDeviceArrayPointer_OpenMPDEV(Jv); if (check_flag((void *) JV, "N_VGetDeviceArrayPointer_OpenMPDEV", 0)) return 1; N_VConst(0.0, Jv); /* initialize Jv product to zero */ /* iterate over domain, computing all Jacobian-vector products */ c1 = k/dx/dx; c2 = -RCONST(2.0)*k/dx/dx; #pragma omp target map(to:c1,c2,N) is_device_ptr(JV,V) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i=1; i<N-1; i++) JV[i] = c1*V[i-1] + c2*V[i] + c1*V[i+1]; return 0; /* Return with success */ } /*------------------------------- * Private helper functions *-------------------------------*/ /* Check function return value... opt == 0 means SUNDIALS function allocates memory so check if returned NULL pointer opt == 1 means SUNDIALS function returns a flag so check if flag >= 0 opt == 2 means function allocates memory so check if returned NULL pointer */ static int check_flag(void *flagvalue, const char *funcname, int opt) { int *errflag; /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ if (opt == 0 && flagvalue == NULL) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return 1; } /* Check if flag < 0 */ else if (opt == 1) { errflag = (int *) flagvalue; if (*errflag < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n", funcname, *errflag); return 1; }} /* Check if function returned NULL pointer - no memory allocated */ else if (opt == 2 && flagvalue == NULL) { fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return 1; } return 0; } /*---- end of file ----*/
Parallelizer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H namespace Eigen { namespace internal { /** \internal */ inline void manage_multi_threading(Action action, int* v) { static EIGEN_UNUSED int m_maxThreads = -1; if(action==SetAction) { eigen_internal_assert(v!=0); m_maxThreads = *v; } else if(action==GetAction) { eigen_internal_assert(v!=0); #ifdef EIGEN_HAS_OPENMP if(m_maxThreads>0) *v = m_maxThreads; else *v = omp_get_max_threads(); #else *v = 1; #endif } else { eigen_internal_assert(false); } } } /** Must be call first when calling Eigen from multiple threads */ inline void initParallel() { int nbt; internal::manage_multi_threading(GetAction, &nbt); std::ptrdiff_t l1, l2, l3; internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); } /** \returns the max number of threads reserved for Eigen * \sa setNbThreads */ inline int nbThreads() { int ret; internal::manage_multi_threading(GetAction, &ret); return ret; } /** Sets the max number of threads reserved for Eigen * \sa nbThreads */ inline void setNbThreads(int v) { internal::manage_multi_threading(SetAction, &v); } namespace internal { template<typename Index> struct GemmParallelInfo { GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {} int volatile sync; int volatile users; Index lhs_start; Index lhs_length; }; template<bool Condition, typename Functor, typename Index> void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpose) { // TODO when EIGEN_USE_BLAS is defined, // we should still enable OMP for other scalar types #if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS) // FIXME the transpose variable is only needed to properly split // the matrix product when multithreading is enabled. This is a temporary // fix to support row-major destination matrices. This whole // parallelizer mechanism has to be redisigned anyway. EIGEN_UNUSED_VARIABLE(transpose); func(0,rows, 0,cols); #else // Dynamically check whether we should enable or disable OpenMP. // The conditions are: // - the max number of threads we can create is greater than 1 // - we are not already in a parallel code // - the sizes are large enough // 1- are we already in a parallel session? // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? if((!Condition) || (omp_get_num_threads()>1)) return func(0,rows, 0,cols); Index size = transpose ? rows : cols; // 2- compute the maximal number of threads from the size of the product: // FIXME this has to be fine tuned Index max_threads = std::max<Index>(1,size / 32); // 3 - compute the number of threads we are going to use Index threads = std::min<Index>(nbThreads(), max_threads); if(threads==1) return func(0,rows, 0,cols); Eigen::initParallel(); func.initParallelSession(threads); if(transpose) std::swap(rows,cols); ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0); #pragma omp parallel num_threads(threads) { Index i = omp_get_thread_num(); // Note that the actual number of threads might be lower than the number of request ones. Index actual_threads = omp_get_num_threads(); Index blockCols = (cols / actual_threads) & ~Index(0x3); Index blockRows = (rows / actual_threads); blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr; Index r0 = i*blockRows; Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows; Index c0 = i*blockCols; Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols; info[i].lhs_start = r0; info[i].lhs_length = actualBlockRows; if(transpose) func(c0, actualBlockCols, 0, rows, info); else func(0, rows, c0, actualBlockCols, info); } #endif } } // end namespace internal } // end namespace Eigen #endif // EIGEN_PARALLELIZER_H
convolution_sgemm_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack4_msa(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 4u * 4, 4, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const float* bias = _bias; // permute Mat tmp; if (size >= 12) tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, 4u * 4, 4, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 4u * 4, 4, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 4u * 4, 4, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 4u * 4, 4, opt.workspace_allocator); else tmp.create(maxk, inch, size, 4u * 4, 4, opt.workspace_allocator); { int remain_size_start = 0; int nn_size = size / 12; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 12; float* tmpptr = tmp.channel(i / 12); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x12 v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0); v4f32 _r4 = (v4f32)__msa_ld_w(img0 + 4 * 4, 0); v4f32 _r5 = (v4f32)__msa_ld_w(img0 + 4 * 5, 0); v4f32 _r6 = (v4f32)__msa_ld_w(img0 + 4 * 6, 0); v4f32 _r7 = (v4f32)__msa_ld_w(img0 + 4 * 7, 0); v4f32 _r8 = (v4f32)__msa_ld_w(img0 + 4 * 8, 0); v4f32 _r9 = (v4f32)__msa_ld_w(img0 + 4 * 9, 0); v4f32 _ra = (v4f32)__msa_ld_w(img0 + 4 * 10, 0); v4f32 _rb = (v4f32)__msa_ld_w(img0 + 4 * 11, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4); v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4); v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6); v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6); v4i32 _r89r = __msa_ilvr_w((v4i32)_r9, (v4i32)_r8); v4i32 _r89l = __msa_ilvl_w((v4i32)_r9, (v4i32)_r8); v4i32 _rabr = __msa_ilvr_w((v4i32)_rb, (v4i32)_ra); v4i32 _rabl = __msa_ilvl_w((v4i32)_rb, (v4i32)_ra); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r89ab_0 = __msa_ilvr_d((v2i64)_rabr, (v2i64)_r89r); v2i64 _r89ab_1 = __msa_ilvl_d((v2i64)_rabr, (v2i64)_r89r); v2i64 _r89ab_2 = __msa_ilvr_d((v2i64)_rabl, (v2i64)_r89l); v2i64 _r89ab_3 = __msa_ilvl_d((v2i64)_rabl, (v2i64)_r89l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0); __msa_st_w((v4i32)_r89ab_0, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 3, 0); __msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 4, 0); __msa_st_w((v4i32)_r89ab_1, tmpptr + 4 * 5, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 6, 0); __msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 7, 0); __msa_st_w((v4i32)_r89ab_2, tmpptr + 4 * 8, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 9, 0); __msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 10, 0); __msa_st_w((v4i32)_r89ab_3, tmpptr + 4 * 11, 0); img0 += size * 4; tmpptr += 48; } } } remain_size_start += nn_size * 12; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x8 v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0); v4f32 _r4 = (v4f32)__msa_ld_w(img0 + 4 * 4, 0); v4f32 _r5 = (v4f32)__msa_ld_w(img0 + 4 * 5, 0); v4f32 _r6 = (v4f32)__msa_ld_w(img0 + 4 * 6, 0); v4f32 _r7 = (v4f32)__msa_ld_w(img0 + 4 * 7, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4); v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4); v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6); v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 3, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 4, 0); __msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 5, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 6, 0); __msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 7, 0); img0 += size * 4; tmpptr += 32; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x4 v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 3, 0); img0 += size * 4; tmpptr += 16; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x2 v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0); v4i32 _r01_0 = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01_1 = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); __msa_st_w((v4i32)_r01_0, tmpptr, 0); __msa_st_w((v4i32)_r01_1, tmpptr + 4, 0); img0 += size * 4; tmpptr += 8; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { v4f32 _val = (v4f32)__msa_ld_w(img0, 0); __msa_st_w((v4i32)_val, tmpptr, 0); img0 += size * 4; tmpptr += 4; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr0 = top_blob.channel(p); int i = 0; for (; i + 11 < size; i += 12) { const float* tmpptr = tmp.channel(i / 12); const float* kptr0 = kernel.channel(p); int nn = inch * maxk * 4; // inch always > 0 v4f32 _sum0 = bias ? (v4f32)__msa_ld_w(bias + p * 4, 0) : (v4f32)__msa_fill_w(0); v4f32 _sum1 = _sum0; v4f32 _sum2 = _sum0; v4f32 _sum3 = _sum0; v4f32 _sum4 = _sum0; v4f32 _sum5 = _sum0; v4f32 _sum6 = _sum0; v4f32 _sum7 = _sum0; v4f32 _sum8 = _sum0; v4f32 _sum9 = _sum0; v4f32 _suma = _sum0; v4f32 _sumb = _sum0; for (int j = 0; j < nn; j++) { __builtin_prefetch(tmpptr + 96); __builtin_prefetch(kptr0 + 32); v4i32 _val0123 = __msa_ld_w(tmpptr, 0); v4i32 _val4567 = __msa_ld_w(tmpptr + 4, 0); v4i32 _val89ab = __msa_ld_w(tmpptr + 8, 0); v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); _sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0); _sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0); _sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0); _sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0); _sum8 = __msa_fmadd_w(_sum8, (v4f32)__msa_splati_w(_val89ab, 0), _w0); _sum9 = __msa_fmadd_w(_sum9, (v4f32)__msa_splati_w(_val89ab, 1), _w0); _suma = __msa_fmadd_w(_suma, (v4f32)__msa_splati_w(_val89ab, 2), _w0); _sumb = __msa_fmadd_w(_sumb, (v4f32)__msa_splati_w(_val89ab, 3), _w0); tmpptr += 12; kptr0 += 4; } __msa_st_w((v4i32)_sum0, outptr0, 0); __msa_st_w((v4i32)_sum1, outptr0 + 4, 0); __msa_st_w((v4i32)_sum2, outptr0 + 4 * 2, 0); __msa_st_w((v4i32)_sum3, outptr0 + 4 * 3, 0); __msa_st_w((v4i32)_sum4, outptr0 + 4 * 4, 0); __msa_st_w((v4i32)_sum5, outptr0 + 4 * 5, 0); __msa_st_w((v4i32)_sum6, outptr0 + 4 * 6, 0); __msa_st_w((v4i32)_sum7, outptr0 + 4 * 7, 0); __msa_st_w((v4i32)_sum8, outptr0 + 4 * 8, 0); __msa_st_w((v4i32)_sum9, outptr0 + 4 * 9, 0); __msa_st_w((v4i32)_suma, outptr0 + 4 * 10, 0); __msa_st_w((v4i32)_sumb, outptr0 + 4 * 11, 0); outptr0 += 4 * 12; } for (; i + 7 < size; i += 8) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); const float* kptr0 = kernel.channel(p); int nn = inch * maxk * 4; // inch always > 0 v4f32 _sum0 = bias ? (v4f32)__msa_ld_w(bias + p * 4, 0) : (v4f32)__msa_fill_w(0); v4f32 _sum1 = _sum0; v4f32 _sum2 = _sum0; v4f32 _sum3 = _sum0; v4f32 _sum4 = _sum0; v4f32 _sum5 = _sum0; v4f32 _sum6 = _sum0; v4f32 _sum7 = _sum0; for (int j = 0; j < nn; j++) { __builtin_prefetch(tmpptr + 64); __builtin_prefetch(kptr0 + 32); v4i32 _val0123 = __msa_ld_w(tmpptr, 0); v4i32 _val4567 = __msa_ld_w(tmpptr + 4, 0); v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); _sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0); _sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0); _sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0); _sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0); tmpptr += 8; kptr0 += 4; } __msa_st_w((v4i32)_sum0, outptr0, 0); __msa_st_w((v4i32)_sum1, outptr0 + 4, 0); __msa_st_w((v4i32)_sum2, outptr0 + 4 * 2, 0); __msa_st_w((v4i32)_sum3, outptr0 + 4 * 3, 0); __msa_st_w((v4i32)_sum4, outptr0 + 4 * 4, 0); __msa_st_w((v4i32)_sum5, outptr0 + 4 * 5, 0); __msa_st_w((v4i32)_sum6, outptr0 + 4 * 6, 0); __msa_st_w((v4i32)_sum7, outptr0 + 4 * 7, 0); outptr0 += 4 * 8; } for (; i + 3 < size; i += 4) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* kptr0 = kernel.channel(p); int nn = inch * maxk * 4; // inch always > 0 v4f32 _sum0 = bias ? (v4f32)__msa_ld_w(bias + p * 4, 0) : (v4f32)__msa_fill_w(0); v4f32 _sum1 = _sum0; v4f32 _sum2 = _sum0; v4f32 _sum3 = _sum0; for (int j = 0; j < nn; j++) { __builtin_prefetch(tmpptr + 32); __builtin_prefetch(kptr0 + 32); v4i32 _val0123 = __msa_ld_w(tmpptr, 0); v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); tmpptr += 4; kptr0 += 4; } __msa_st_w((v4i32)_sum0, outptr0, 0); __msa_st_w((v4i32)_sum1, outptr0 + 4, 0); __msa_st_w((v4i32)_sum2, outptr0 + 4 * 2, 0); __msa_st_w((v4i32)_sum3, outptr0 + 4 * 3, 0); outptr0 += 4 * 4; } for (; i + 1 < size; i += 2) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* kptr0 = kernel.channel(p); int nn = inch * maxk * 4; // inch always > 0 v4f32 _sum0 = bias ? (v4f32)__msa_ld_w(bias + p * 4, 0) : (v4f32)__msa_fill_w(0); v4f32 _sum1 = _sum0; for (int j = 0; j < nn; j++) { __builtin_prefetch(tmpptr + 16); __builtin_prefetch(kptr0 + 32); v4f32 _val0 = __msa_fill_w_f32(*tmpptr++); v4f32 _val1 = __msa_fill_w_f32(*tmpptr++); v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0); _sum0 = __msa_fmadd_w(_sum0, _val0, _w0); _sum1 = __msa_fmadd_w(_sum1, _val1, _w0); kptr0 += 4; } __msa_st_w((v4i32)_sum0, outptr0, 0); __msa_st_w((v4i32)_sum1, outptr0 + 4, 0); outptr0 += 4 * 2; } for (; i < size; i++) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* kptr0 = kernel.channel(p); int nn = inch * maxk * 4; // inch always > 0 v4f32 _sum = bias ? (v4f32)__msa_ld_w(bias + p * 4, 0) : (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(tmpptr + 8); __builtin_prefetch(kptr0 + 32); v4f32 _val0 = __msa_fill_w_f32(*tmpptr++); v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0); _sum = __msa_fmadd_w(_sum, _val0, _w0); kptr0 += 4; } __msa_st_w((v4i32)_sum, outptr0, 0); outptr0 += 4; } } } static void convolution_im2col_sgemm_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 4u * 4, 4, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * 4; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); float* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const float* sptr = img.row<const float>(dilation_h * u) + dilation_w * v * 4; for (int i = 0; i < outh; i++) { int j = 0; for (; j < outw; j++) { v4f32 _val = (v4f32)__msa_ld_w(sptr, 0); __msa_st_w((v4i32)_val, ptr, 0); sptr += stride_w * 4; ptr += 4; } sptr += gap; } } } } } im2col_sgemm_pack4_msa(bottom_im2col, top_blob, kernel, _bias, opt); }
MD5_std.c
/* * This file is part of John the Ripper password cracker, * Copyright (c) 1996-2001,2003,2006,2011 by Solar Designer * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * There's ABSOLUTELY NO WARRANTY, express or implied. * * This implementation of FreeBSD-style MD5-based crypt(3) password hashing * supports passwords of up to 15 characters long only since this lets us use a * significantly faster algorithm. -- SD */ #include <string.h> #include "arch.h" #include "common.h" #include "MD5_std.h" #if MD5_std_mt #include <omp.h> int MD5_std_min_kpc, MD5_std_max_kpc; int MD5_std_nt; MD5_std_combined *MD5_std_all_p = NULL; static char saved_salt[9]; static int salt_changed; #else MD5_std_combined CC_CACHE_ALIGN MD5_std_all; #endif #include "memdbg.h" #if !MD5_IMM static const MD5_data MD5_data_init = { { 0xd76aa477, 0xf8fa0bcc, 0xbcdb4dd9, 0xb18b7a77, 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501, 0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be, 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821, 0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa, 0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8, 0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed, 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a, 0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c, 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70, 0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x04881d05, 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665, 0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039, 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1, 0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1, 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391 }, { 0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476 }, { 0x77777777, 0x00ff00ff } }; #endif #if !MD5_ASM #define S11 7 #define S12 12 #define S13 17 #define S14 22 #define S21 5 #define S22 9 #define S23 14 #define S24 20 #define S31 4 #define S32 11 #define S33 16 #define S34 23 #define S41 6 #define S42 10 #define S43 15 #define S44 21 #if MD5_IMM /* * Using immediate values is good for CISC. */ #define AC1 0xd76aa477 #define AC2pCd 0xf8fa0bcc #define AC3pCc 0xbcdb4dd9 #define AC4pCb 0xb18b7a77 #define AC5 0xf57c0faf #define AC6 0x4787c62a #define AC7 0xa8304613 #define AC8 0xfd469501 #define AC9 0x698098d8 #define AC10 0x8b44f7af #define AC11 0xffff5bb1 #define AC12 0x895cd7be #define AC13 0x6b901122 #define AC14 0xfd987193 #define AC15 0xa679438e #define AC16 0x49b40821 #define AC17 0xf61e2562 #define AC18 0xc040b340 #define AC19 0x265e5a51 #define AC20 0xe9b6c7aa #define AC21 0xd62f105d #define AC22 0x02441453 #define AC23 0xd8a1e681 #define AC24 0xe7d3fbc8 #define AC25 0x21e1cde6 #define AC26 0xc33707d6 #define AC27 0xf4d50d87 #define AC28 0x455a14ed #define AC29 0xa9e3e905 #define AC30 0xfcefa3f8 #define AC31 0x676f02d9 #define AC32 0x8d2a4c8a #define AC33 0xfffa3942 #define AC34 0x8771f681 #define AC35 0x6d9d6122 #define AC36 0xfde5380c #define AC37 0xa4beea44 #define AC38 0x4bdecfa9 #define AC39 0xf6bb4b60 #define AC40 0xbebfbc70 #define AC41 0x289b7ec6 #define AC42 0xeaa127fa #define AC43 0xd4ef3085 #define AC44 0x04881d05 #define AC45 0xd9d4d039 #define AC46 0xe6db99e5 #define AC47 0x1fa27cf8 #define AC48 0xc4ac5665 #define AC49 0xf4292244 #define AC50 0x432aff97 #define AC51 0xab9423a7 #define AC52 0xfc93a039 #define AC53 0x655b59c3 #define AC54 0x8f0ccc92 #define AC55 0xffeff47d #define AC56 0x85845dd1 #define AC57 0x6fa87e4f #define AC58 0xfe2ce6e0 #define AC59 0xa3014314 #define AC60 0x4e0811a1 #define AC61 0xf7537e82 #define AC62 0xbd3af235 #define AC63 0x2ad7d2bb #define AC64 0xeb86d391 #define Ca 0x67452301 #define Cb 0xefcdab89 #define Cc 0x98badcfe #define Cd 0x10325476 #define MASK1 0x77777777 #define OOFFOOFF 0x00ff00ff #else /* * If we used immediate values on RISC with 32-bit instruction size, it would * take about twice more instructions to load all the values. */ #define MD5_AC MD5_std_all.data.AC #define AC1 MD5_AC[0] #define AC2pCd MD5_AC[1] #define AC3pCc MD5_AC[2] #define AC4pCb MD5_AC[3] #define AC5 MD5_AC[4] #define AC6 MD5_AC[5] #define AC7 MD5_AC[6] #define AC8 MD5_AC[7] #define AC9 MD5_AC[8] #define AC10 MD5_AC[9] #define AC11 MD5_AC[10] #define AC12 MD5_AC[11] #define AC13 MD5_AC[12] #define AC14 MD5_AC[13] #define AC15 MD5_AC[14] #define AC16 MD5_AC[15] #define AC17 MD5_AC[16] #define AC18 MD5_AC[17] #define AC19 MD5_AC[18] #define AC20 MD5_AC[19] #define AC21 MD5_AC[20] #define AC22 MD5_AC[21] #define AC23 MD5_AC[22] #define AC24 MD5_AC[23] #define AC25 MD5_AC[24] #define AC26 MD5_AC[25] #define AC27 MD5_AC[26] #define AC28 MD5_AC[27] #define AC29 MD5_AC[28] #define AC30 MD5_AC[29] #define AC31 MD5_AC[30] #define AC32 MD5_AC[31] #define AC33 MD5_AC[32] #define AC34 MD5_AC[33] #define AC35 MD5_AC[34] #define AC36 MD5_AC[35] #define AC37 MD5_AC[36] #define AC38 MD5_AC[37] #define AC39 MD5_AC[38] #define AC40 MD5_AC[39] #define AC41 MD5_AC[40] #define AC42 MD5_AC[41] #define AC43 MD5_AC[42] #define AC44 MD5_AC[43] #define AC45 MD5_AC[44] #define AC46 MD5_AC[45] #define AC47 MD5_AC[46] #define AC48 MD5_AC[47] #define AC49 MD5_AC[48] #define AC50 MD5_AC[49] #define AC51 MD5_AC[50] #define AC52 MD5_AC[51] #define AC53 MD5_AC[52] #define AC54 MD5_AC[53] #define AC55 MD5_AC[54] #define AC56 MD5_AC[55] #define AC57 MD5_AC[56] #define AC58 MD5_AC[57] #define AC59 MD5_AC[58] #define AC60 MD5_AC[59] #define AC61 MD5_AC[60] #define AC62 MD5_AC[61] #define AC63 MD5_AC[62] #define AC64 MD5_AC[63] #define MD5_IV MD5_std_all.data.IV #define Ca MD5_IV[0] #define Cb MD5_IV[1] #define Cc MD5_IV[2] #define Cd MD5_IV[3] #define MASK1 MD5_std_all.data.masks[0] #define OOFFOOFF MD5_std_all.data.masks[1] #endif /* * F, G, H and I are basic MD5 functions. */ #define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) #define G(x, y, z) ((y) ^ ((z) & ((x) ^ (y)))) #define H(x, y, z) (((x) ^ (y)) ^ (z)) #define H2(x, y, z) ((x) ^ ((y) ^ (z))) #define I(x, y, z) ((y) ^ ((x) | ~(z))) /* * ROTATE_LEFT rotates x left n bits. */ #define ROTATE_LEFT(x, n) \ (x) = (((x) << (n)) | ((MD5_word)(x) >> (32 - (n)))) /* * FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4. * Rotation is separate from addition to prevent recomputation. */ #define FF(a, b, c, d, x, s, ac) \ (a) += F ((b), (c), (d)) + (x) + (ac); \ ROTATE_LEFT ((a), (s)); \ (a) += (b); #define GG(a, b, c, d, x, s, ac) \ (a) += G ((b), (c), (d)) + (x) + (ac); \ ROTATE_LEFT ((a), (s)); \ (a) += (b); #define HH(a, b, c, d, x, s, ac) \ (a) += H ((b), (c), (d)) + (x) + (ac); \ ROTATE_LEFT ((a), (s)); \ (a) += (b); #define HH2(a, b, c, d, x, s, ac) \ (a) += H2 ((b), (c), (d)) + (x) + (ac); \ ROTATE_LEFT ((a), (s)); \ (a) += (b); #define II(a, b, c, d, x, s, ac) \ (a) += I ((b), (c), (d)) + (x) + (ac); \ ROTATE_LEFT ((a), (s)); \ (a) += (b); #endif static const unsigned char PADDING[56] = { 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; #if ARCH_LITTLE_ENDIAN #define MD5_swap(dst, src, count) #else #define MD5_swap(dst, src, count) \ { \ MD5_word *dptr = (dst), *sptr = (src); \ int loop_count = (count); \ MD5_word mask = OOFFOOFF; \ do { \ MD5_word tmp = *sptr++; \ ROTATE_LEFT(tmp, 16); \ *dptr++ = ((tmp & mask) << 8) | ((tmp >> 8) & mask); \ tmp = *sptr++; \ ROTATE_LEFT(tmp, 16); \ *dptr++ = ((tmp & mask) << 8) | ((tmp >> 8) & mask); \ } while ((loop_count -= 2)); \ } #endif #define order MD5_std_all._order #define pool MD5_std_all._pool #define block MD5_std_all._block #define prefix MD5_std_all.prefix #define prelen MD5_std_all.prelen void MD5_std_init(struct fmt_main *self) { int index; MD5_pool *current; #if MD5_std_mt int t, n; // Note, $dynamic_n$ will call here for setup. If set are !MD5_IMM, // $dynamic_n$ will NOT be able to use the MD5 functions. // but since I do not know if this function can be called multiple // times, I simply added a static, so the init WILL get run, but // only 1 time. static int bFirst = 1; if (!bFirst) return; bFirst = 0; if (!MD5_std_all_p) { n = omp_get_max_threads(); if (n < 1) n = 1; if (n > MD5_std_mt_max) n = MD5_std_mt_max; MD5_std_min_kpc = n * MD5_N; { int max = n * MD5_std_cpt; while (max > MD5_std_mt_max) max -= n; n = max; } MD5_std_max_kpc = n * MD5_N; /* * The array of MD5_std_all's is not exactly tiny, but we use mem_alloc_tiny() * for its alignment support and error checking. We do not need to free() this * memory anyway. */ MD5_std_all_p = mem_alloc_tiny(n * MD5_std_all_size, MEM_ALIGN_PAGE); MD5_std_nt = n; } #endif for_each_t(MD5_std_nt) { #if !MD5_IMM MD5_std_all.data = MD5_data_init; #endif current = pool; for (index = 0; index < MD5_N; index++) { #define init_line(line, init_even, init_odd) \ order[line][index].even = init_even; \ order[line][index].odd = init_odd; init_line(0, &current->e.p, &current->o.psp); init_line(1, &current->e.spp, &current->o.pp); init_line(2, &current->e.spp, &current->o.psp); init_line(3, &current->e.pp, &current->o.ps); init_line(4, &current->e.spp, &current->o.pp); init_line(5, &current->e.spp, &current->o.psp); init_line(6, &current->e.pp, &current->o.psp); init_line(7, &current->e.sp, &current->o.pp); init_line(8, &current->e.spp, &current->o.psp); init_line(9, &current->e.pp, &current->o.psp); init_line(10, &current->e.spp, &current->o.p); init_line(11, &current->e.spp, &current->o.psp); init_line(12, &current->e.pp, &current->o.psp); init_line(13, &current->e.spp, &current->o.pp); init_line(14, &current->e.sp, &current->o.psp); init_line(15, &current->e.pp, &current->o.psp); init_line(16, &current->e.spp, &current->o.pp); init_line(17, &current->e.spp, &current->o.ps); init_line(18, &current->e.pp, &current->o.psp); init_line(19, &current->e.spp, &current->o.pp); init_line(20, &current->e.spp, &current->o.psp); #undef init_line current++; } } } #if MD5_std_mt static MAYBE_INLINE void MD5_std_set_salt_for_thread(int t, char *salt) #else void MD5_std_set_salt(char *salt) #endif { int length; for (length = 0; length < 8 && salt[length]; length++); memcpy(pool[0].s, salt, pool[0].l.s = length); #if MD5_X2 memcpy(pool[1].s, salt, pool[1].l.s = length); #endif if (salt[8] == MD5_TYPE_STD) { prefix = "$1$"; prelen = 3; } else if (salt[8] == MD5_TYPE_APACHE) { prefix = "$apr1$"; prelen = 6; } else if (salt[8] == MD5_TYPE_AIX) { prefix = ""; prelen = 0; } } #if MD5_std_mt void MD5_std_set_salt(char *salt) { memcpy(saved_salt, salt, sizeof(saved_salt)); salt_changed = 1; } #endif void MD5_std_set_key(char *key, int index) { int length; MD5_pool *current; init_t(); for (length = 0; key[length] && length < 15; length++); current = &pool[index]; memcpy(current->o.p.b, key, current->l.p = length); memcpy(&current->o.p.b[length + 16], PADDING, 40 - length); current->o.p.w[14] = (length + 16) << 3; memcpy(current->o.pp.b, key, length); memcpy(&current->o.pp.b[length], key, length); current->l.pp = length << 1; memcpy(&current->o.pp.b[current->l.pp + 16], PADDING, 40 - current->l.pp); current->o.pp.w[14] = (current->l.pp + 16) << 3; memcpy(&current->e.p.b[16], key, length); memcpy(&current->e.p.b[16 + length], PADDING, 40 - length); current->e.p.w[14] = (length + 16) << 3; MD5_swap(current->e.p.w, current->e.p.w, 14); memcpy(&current->e.pp.b[16], current->o.pp.b, current->l.pp); memcpy(&current->e.pp.b[16 + current->l.pp], PADDING, 40 - current->l.pp); current->e.pp.w[14] = (current->l.pp + 16) << 3; MD5_swap(current->e.pp.w, current->e.pp.w, 14); order[1][index].length = current->l.pp; order[4][index].length = current->l.pp; order[7][index].length = current->l.pp; order[10][index].length = length; order[13][index].length = current->l.pp; order[16][index].length = current->l.pp; order[19][index].length = current->l.pp; } #if MD5_ASM extern void MD5_body(MD5_word x[15], MD5_word out[4]); #else /* * x86-64 implies a fairly recent CPU, so presumably its L1 instruction cache * is large enough. */ #ifdef __x86_64__ #define MAYBE_INLINE_BODY MAYBE_INLINE #else #define MAYBE_INLINE_BODY #endif #if !MD5_X2 #if MD5_std_mt #define MD5_body(x, out) \ MD5_body_for_thread(t, x, out) MAYBE_INLINE_BODY void MD5_body_for_thread(int t, MD5_word x[15], MD5_word out[4]) #else MAYBE_INLINE_BODY void MD5_body(MD5_word x[15], MD5_word out[4]) #endif { MD5_word a, b = Cb, c = Cc, d; /* Round 1 */ a = AC1 + x[0]; ROTATE_LEFT (a, S11); a += b; /* 1 */ d = (c ^ (a & MASK1)) + x[1] + AC2pCd; ROTATE_LEFT (d, S12); d += a; /* 2 */ c = F(d, a, b) + x[2] + AC3pCc; ROTATE_LEFT(c, S13); c += d; /* 3 */ b = F(c, d, a) + x[3] + AC4pCb; ROTATE_LEFT(b, S14); b += c; /* 4 */ FF (a, b, c, d, x[ 4], S11, AC5); /* 5 */ FF (d, a, b, c, x[ 5], S12, AC6); /* 6 */ FF (c, d, a, b, x[ 6], S13, AC7); /* 7 */ FF (b, c, d, a, x[ 7], S14, AC8); /* 8 */ FF (a, b, c, d, x[ 8], S11, AC9); /* 9 */ FF (d, a, b, c, x[ 9], S12, AC10); /* 10 */ FF (c, d, a, b, x[10], S13, AC11); /* 11 */ FF (b, c, d, a, x[11], S14, AC12); /* 12 */ FF (a, b, c, d, x[12], S11, AC13); /* 13 */ FF (d, a, b, c, x[13], S12, AC14); /* 14 */ FF (c, d, a, b, x[14], S13, AC15); /* 15 */ b += F (c, d, a) + AC16; ROTATE_LEFT (b, S14); b += c; /* 16 */ /* Round 2 */ GG (a, b, c, d, x[ 1], S21, AC17); /* 17 */ GG (d, a, b, c, x[ 6], S22, AC18); /* 18 */ GG (c, d, a, b, x[11], S23, AC19); /* 19 */ GG (b, c, d, a, x[ 0], S24, AC20); /* 20 */ GG (a, b, c, d, x[ 5], S21, AC21); /* 21 */ GG (d, a, b, c, x[10], S22, AC22); /* 22 */ c += G (d, a, b) + AC23; ROTATE_LEFT (c, S23); c += d; /* 23 */ GG (b, c, d, a, x[ 4], S24, AC24); /* 24 */ GG (a, b, c, d, x[ 9], S21, AC25); /* 25 */ GG (d, a, b, c, x[14], S22, AC26); /* 26 */ GG (c, d, a, b, x[ 3], S23, AC27); /* 27 */ GG (b, c, d, a, x[ 8], S24, AC28); /* 28 */ GG (a, b, c, d, x[13], S21, AC29); /* 29 */ GG (d, a, b, c, x[ 2], S22, AC30); /* 30 */ GG (c, d, a, b, x[ 7], S23, AC31); /* 31 */ GG (b, c, d, a, x[12], S24, AC32); /* 32 */ /* Round 3 */ HH (a, b, c, d, x[ 5], S31, AC33); /* 33 */ HH2 (d, a, b, c, x[ 8], S32, AC34); /* 34 */ HH (c, d, a, b, x[11], S33, AC35); /* 35 */ HH2 (b, c, d, a, x[14], S34, AC36); /* 36 */ HH (a, b, c, d, x[ 1], S31, AC37); /* 37 */ HH2 (d, a, b, c, x[ 4], S32, AC38); /* 38 */ HH (c, d, a, b, x[ 7], S33, AC39); /* 39 */ HH2 (b, c, d, a, x[10], S34, AC40); /* 40 */ HH (a, b, c, d, x[13], S31, AC41); /* 41 */ HH2 (d, a, b, c, x[ 0], S32, AC42); /* 42 */ HH (c, d, a, b, x[ 3], S33, AC43); /* 43 */ HH2 (b, c, d, a, x[ 6], S34, AC44); /* 44 */ HH (a, b, c, d, x[ 9], S31, AC45); /* 45 */ HH2 (d, a, b, c, x[12], S32, AC46); /* 46 */ c += H (d, a, b) + AC47; ROTATE_LEFT (c, S33); c += d; /* 47 */ HH2 (b, c, d, a, x[ 2], S34, AC48); /* 48 */ /* Round 4 */ II (a, b, c, d, x[ 0], S41, AC49); /* 49 */ II (d, a, b, c, x[ 7], S42, AC50); /* 50 */ II (c, d, a, b, x[14], S43, AC51); /* 51 */ II (b, c, d, a, x[ 5], S44, AC52); /* 52 */ II (a, b, c, d, x[12], S41, AC53); /* 53 */ II (d, a, b, c, x[ 3], S42, AC54); /* 54 */ II (c, d, a, b, x[10], S43, AC55); /* 55 */ II (b, c, d, a, x[ 1], S44, AC56); /* 56 */ II (a, b, c, d, x[ 8], S41, AC57); /* 57 */ d += I (a, b, c) + AC58; ROTATE_LEFT (d, S42); d += a; /* 58 */ II (c, d, a, b, x[ 6], S43, AC59); /* 59 */ II (b, c, d, a, x[13], S44, AC60); /* 60 */ II (a, b, c, d, x[ 4], S41, AC61); /* 61 */ II (d, a, b, c, x[11], S42, AC62); /* 62 */ II (c, d, a, b, x[ 2], S43, AC63); /* 63 */ II (b, c, d, a, x[ 9], S44, AC64); /* 64 */ out[0] = Ca + a; out[1] = Cb + b; out[2] = Cc + c; out[3] = Cd + d; } #else #if MD5_std_mt #define MD5_body(x0, x1, out0, out1) \ MD5_body_for_thread(t, x0, x1, out0, out1) MAYBE_INLINE_BODY void MD5_body_for_thread(int t, MD5_word x0[15], MD5_word x1[15], MD5_word out0[4], MD5_word out1[4]) #else MAYBE_INLINE_BODY void MD5_body(MD5_word x0[15], MD5_word x1[15], MD5_word out0[4], MD5_word out1[4]) #endif { MD5_word a0, b0 = Cb, c0 = Cc, d0; MD5_word a1, b1, c1, d1; MD5_word u, v; /* Round 1 */ a0 = (u = AC1) + x0[0]; ROTATE_LEFT (a0, S11); a0 += b0; /* 1 */ a1 = u + x1[0]; ROTATE_LEFT (a1, S11); a1 += b0; /* 1 */ d0 = (c0 ^ (a0 & (u = MASK1))) + x0[1] + (v = AC2pCd); ROTATE_LEFT (d0, S12); d0 += a0; /* 2 */ d1 = (c0 ^ (a1 & u)) + x1[1] + v; ROTATE_LEFT (d1, S12); d1 += a1; /* 2 */ c0 = F(d0, a0, b0) + x0[2] + (u = AC3pCc); ROTATE_LEFT(c0, S13); c0 += d0; /* 3 */ c1 = F(d1, a1, b0) + x1[2] + u; ROTATE_LEFT(c1, S13); c1 += d1; /* 3 */ b0 = F(c0, d0, a0) + x0[3] + (u = AC4pCb); ROTATE_LEFT(b0, S14); b0 += c0; /* 4 */ b1 = F(c1, d1, a1) + x1[3] + u; ROTATE_LEFT(b1, S14); b1 += c1; /* 4 */ FF (a0, b0, c0, d0, x0[ 4], S11, (u = AC5)); /* 5 */ FF (a1, b1, c1, d1, x1[ 4], S11, u); /* 5 */ FF (d0, a0, b0, c0, x0[ 5], S12, (u = AC6)); /* 6 */ FF (d1, a1, b1, c1, x1[ 5], S12, u); /* 6 */ FF (c0, d0, a0, b0, x0[ 6], S13, (u = AC7)); /* 7 */ FF (c1, d1, a1, b1, x1[ 6], S13, u); /* 7 */ FF (b0, c0, d0, a0, x0[ 7], S14, (u = AC8)); /* 8 */ FF (b1, c1, d1, a1, x1[ 7], S14, u); /* 8 */ FF (a0, b0, c0, d0, x0[ 8], S11, (u = AC9)); /* 9 */ FF (a1, b1, c1, d1, x1[ 8], S11, u); /* 9 */ FF (d0, a0, b0, c0, x0[ 9], S12, (u = AC10)); /* 10 */ FF (d1, a1, b1, c1, x1[ 9], S12, u); /* 10 */ FF (c0, d0, a0, b0, x0[10], S13, (u = AC11)); /* 11 */ FF (c1, d1, a1, b1, x1[10], S13, u); /* 11 */ FF (b0, c0, d0, a0, x0[11], S14, (u = AC12)); /* 12 */ FF (b1, c1, d1, a1, x1[11], S14, u); /* 12 */ FF (a0, b0, c0, d0, x0[12], S11, (u = AC13)); /* 13 */ FF (a1, b1, c1, d1, x1[12], S11, u); /* 13 */ FF (d0, a0, b0, c0, x0[13], S12, (u = AC14)); /* 14 */ FF (d1, a1, b1, c1, x1[13], S12, u); /* 14 */ FF (c0, d0, a0, b0, x0[14], S13, (u = AC15)); /* 15 */ FF (c1, d1, a1, b1, x1[14], S13, u); /* 15 */ b0 += F (c0, d0, a0) + (u = AC16); ROTATE_LEFT (b0, S14); b0 += c0; /* 16 */ b1 += F (c1, d1, a1) + u; ROTATE_LEFT (b1, S14); b1 += c1; /* 16 */ /* Round 2 */ GG (a0, b0, c0, d0, x0[ 1], S21, (u = AC17)); /* 17 */ GG (a1, b1, c1, d1, x1[ 1], S21, u); /* 17 */ GG (d0, a0, b0, c0, x0[ 6], S22, (u = AC18)); /* 18 */ GG (d1, a1, b1, c1, x1[ 6], S22, u); /* 18 */ GG (c0, d0, a0, b0, x0[11], S23, (u = AC19)); /* 19 */ GG (c1, d1, a1, b1, x1[11], S23, u); /* 19 */ GG (b0, c0, d0, a0, x0[ 0], S24, (u = AC20)); /* 20 */ GG (b1, c1, d1, a1, x1[ 0], S24, u); /* 20 */ GG (a0, b0, c0, d0, x0[ 5], S21, (u = AC21)); /* 21 */ GG (a1, b1, c1, d1, x1[ 5], S21, u); /* 21 */ GG (d0, a0, b0, c0, x0[10], S22, (u = AC22)); /* 22 */ GG (d1, a1, b1, c1, x1[10], S22, u); /* 22 */ c0 += G (d0, a0, b0) + (u = AC23); ROTATE_LEFT (c0, S23); c0 += d0; /* 23 */ c1 += G (d1, a1, b1) + u; ROTATE_LEFT (c1, S23); c1 += d1; /* 23 */ GG (b0, c0, d0, a0, x0[ 4], S24, (u = AC24)); /* 24 */ GG (b1, c1, d1, a1, x1[ 4], S24, u); /* 24 */ GG (a0, b0, c0, d0, x0[ 9], S21, (u = AC25)); /* 25 */ GG (a1, b1, c1, d1, x1[ 9], S21, u); /* 25 */ GG (d0, a0, b0, c0, x0[14], S22, (u = AC26)); /* 26 */ GG (d1, a1, b1, c1, x1[14], S22, u); /* 26 */ GG (c0, d0, a0, b0, x0[ 3], S23, (u = AC27)); /* 27 */ GG (c1, d1, a1, b1, x1[ 3], S23, u); /* 27 */ GG (b0, c0, d0, a0, x0[ 8], S24, (u = AC28)); /* 28 */ GG (b1, c1, d1, a1, x1[ 8], S24, u); /* 28 */ GG (a0, b0, c0, d0, x0[13], S21, (u = AC29)); /* 29 */ GG (a1, b1, c1, d1, x1[13], S21, u); /* 29 */ GG (d0, a0, b0, c0, x0[ 2], S22, (u = AC30)); /* 30 */ GG (d1, a1, b1, c1, x1[ 2], S22, u); /* 30 */ GG (c0, d0, a0, b0, x0[ 7], S23, (u = AC31)); /* 31 */ GG (c1, d1, a1, b1, x1[ 7], S23, u); /* 31 */ GG (b0, c0, d0, a0, x0[12], S24, (u = AC32)); /* 32 */ GG (b1, c1, d1, a1, x1[12], S24, u); /* 32 */ /* Round 3 */ HH (a0, b0, c0, d0, x0[ 5], S31, (u = AC33)); /* 33 */ HH (a1, b1, c1, d1, x1[ 5], S31, u); /* 33 */ HH (d0, a0, b0, c0, x0[ 8], S32, (u = AC34)); /* 34 */ HH (d1, a1, b1, c1, x1[ 8], S32, u); /* 34 */ HH (c0, d0, a0, b0, x0[11], S33, (u = AC35)); /* 35 */ HH (c1, d1, a1, b1, x1[11], S33, u); /* 35 */ HH (b0, c0, d0, a0, x0[14], S34, (u = AC36)); /* 36 */ HH (b1, c1, d1, a1, x1[14], S34, u); /* 36 */ HH (a0, b0, c0, d0, x0[ 1], S31, (u = AC37)); /* 37 */ HH (a1, b1, c1, d1, x1[ 1], S31, u); /* 37 */ HH (d0, a0, b0, c0, x0[ 4], S32, (u = AC38)); /* 38 */ HH (d1, a1, b1, c1, x1[ 4], S32, u); /* 38 */ HH (c0, d0, a0, b0, x0[ 7], S33, (u = AC39)); /* 39 */ HH (c1, d1, a1, b1, x1[ 7], S33, u); /* 39 */ HH (b0, c0, d0, a0, x0[10], S34, (u = AC40)); /* 40 */ HH (b1, c1, d1, a1, x1[10], S34, u); /* 40 */ HH (a0, b0, c0, d0, x0[13], S31, (u = AC41)); /* 41 */ HH (a1, b1, c1, d1, x1[13], S31, u); /* 41 */ HH (d0, a0, b0, c0, x0[ 0], S32, (u = AC42)); /* 42 */ HH (d1, a1, b1, c1, x1[ 0], S32, u); /* 42 */ HH (c0, d0, a0, b0, x0[ 3], S33, (u = AC43)); /* 43 */ HH (c1, d1, a1, b1, x1[ 3], S33, u); /* 43 */ HH (b0, c0, d0, a0, x0[ 6], S34, (u = AC44)); /* 44 */ HH (b1, c1, d1, a1, x1[ 6], S34, u); /* 44 */ HH (a0, b0, c0, d0, x0[ 9], S31, (u = AC45)); /* 45 */ HH (a1, b1, c1, d1, x1[ 9], S31, u); /* 45 */ HH (d0, a0, b0, c0, x0[12], S32, (u = AC46)); /* 46 */ HH (d1, a1, b1, c1, x1[12], S32, u); /* 46 */ c0 += H (d0, a0, b0) + (u = AC47); ROTATE_LEFT (c0, S33); c0 += d0; /* 47 */ c1 += H (d1, a1, b1) + u; ROTATE_LEFT (c1, S33); c1 += d1; /* 47 */ HH (b0, c0, d0, a0, x0[ 2], S34, (u = AC48)); /* 48 */ HH (b1, c1, d1, a1, x1[ 2], S34, u); /* 48 */ /* Round 4 */ II (a0, b0, c0, d0, x0[ 0], S41, (u = AC49)); /* 49 */ II (a1, b1, c1, d1, x1[ 0], S41, u); /* 49 */ II (d0, a0, b0, c0, x0[ 7], S42, (u = AC50)); /* 50 */ II (d1, a1, b1, c1, x1[ 7], S42, u); /* 50 */ II (c0, d0, a0, b0, x0[14], S43, (u = AC51)); /* 51 */ II (c1, d1, a1, b1, x1[14], S43, u); /* 51 */ II (b0, c0, d0, a0, x0[ 5], S44, (u = AC52)); /* 52 */ II (b1, c1, d1, a1, x1[ 5], S44, u); /* 52 */ II (a0, b0, c0, d0, x0[12], S41, (u = AC53)); /* 53 */ II (a1, b1, c1, d1, x1[12], S41, u); /* 53 */ II (d0, a0, b0, c0, x0[ 3], S42, (u = AC54)); /* 54 */ II (d1, a1, b1, c1, x1[ 3], S42, u); /* 54 */ II (c0, d0, a0, b0, x0[10], S43, (u = AC55)); /* 55 */ II (c1, d1, a1, b1, x1[10], S43, u); /* 55 */ II (b0, c0, d0, a0, x0[ 1], S44, (u = AC56)); /* 56 */ II (b1, c1, d1, a1, x1[ 1], S44, u); /* 56 */ II (a0, b0, c0, d0, x0[ 8], S41, (u = AC57)); /* 57 */ II (a1, b1, c1, d1, x1[ 8], S41, u); /* 57 */ d0 += I (a0, b0, c0) + (u = AC58); ROTATE_LEFT (d0, S42); d0 += a0; /* 58 */ d1 += I (a1, b1, c1) + u; ROTATE_LEFT (d1, S42); d1 += a1; /* 58 */ II (c0, d0, a0, b0, x0[ 6], S43, (u = AC59)); /* 59 */ II (c1, d1, a1, b1, x1[ 6], S43, u); /* 59 */ II (b0, c0, d0, a0, x0[13], S44, (u = AC60)); /* 60 */ II (b1, c1, d1, a1, x1[13], S44, u); /* 60 */ II (a0, b0, c0, d0, x0[ 4], S41, (u = AC61)); /* 61 */ II (a1, b1, c1, d1, x1[ 4], S41, u); /* 61 */ II (d0, a0, b0, c0, x0[11], S42, (u = AC62)); /* 62 */ II (d1, a1, b1, c1, x1[11], S42, u); /* 62 */ II (c0, d0, a0, b0, x0[ 2], S43, (u = AC63)); /* 63 */ II (c1, d1, a1, b1, x1[ 2], S43, u); /* 63 */ II (b0, c0, d0, a0, x0[ 9], S44, (u = AC64)); /* 64 */ II (b1, c1, d1, a1, x1[ 9], S44, u); /* 64 */ out1[3] = Cd + d1; out0[0] = Ca + a0; out0[1] = Cb + b0; out0[2] = Cc + c0; out0[3] = Cd + d0; out1[0] = Ca + a1; out1[1] = Cb + b1; out1[2] = Cc + c1; } #endif #endif #if MD5_std_mt static MAYBE_INLINE void MD5_std_crypt_for_thread(int t) #else void MD5_std_crypt(int count) #endif { int length, index, mask; MD5_pattern *line; #if ARCH_LITTLE_ENDIAN MD5_word *last0; #endif #if MD5_X2 MD5_pool *key; #if ARCH_LITTLE_ENDIAN MD5_word *last1; #endif #endif #if MD5_X2 for (index = 0, key = pool; index < MD5_N; index++, key++) { #else #define index 0 #define key pool #endif memcpy(key->o.ps.b, key->o.p.b, key->l.p); memcpy(&key->o.ps.b[key->l.p], key->s, key->l.s); key->l.ps = key->l.p + key->l.s; memcpy(&key->o.ps.b[key->l.ps + 16], PADDING, 40 - key->l.ps); key->o.ps.w[14] = (key->l.ps + 16) << 3; memcpy(key->o.psp.b, key->o.ps.b, key->l.ps); memcpy(&key->o.psp.b[key->l.ps], key->o.p.b, key->l.p); key->l.psp = key->l.ps + key->l.p; memcpy(&key->o.psp.b[key->l.psp + 16], PADDING, 40 - key->l.psp); key->o.psp.w[14] = (key->l.psp + 16) << 3; memcpy(&key->e.sp.b[16], key->s, key->l.s); memcpy(&key->e.sp.b[16 + key->l.s], key->o.p.b, key->l.p); memcpy(&key->e.sp.b[16 + key->l.ps], PADDING, 40 - key->l.ps); key->e.sp.w[14] = (key->l.ps + 16) << 3; MD5_swap(key->e.sp.w, key->e.sp.w, 14); memcpy(&key->e.spp.b[16], key->s, key->l.s); memcpy(&key->e.spp.b[16 + key->l.s], key->o.pp.b, key->l.pp); memcpy(&key->e.spp.b[16 + key->l.psp], PADDING, 40 - key->l.psp); key->e.spp.w[14] = (key->l.psp + 16) << 3; MD5_swap(key->e.spp.w, key->e.spp.w, 14); order[0][index].length = key->l.psp; order[2][index].length = key->l.psp; order[3][index].length = key->l.ps; order[5][index].length = key->l.psp; order[6][index].length = key->l.psp; order[8][index].length = key->l.psp; order[9][index].length = key->l.psp; order[11][index].length = key->l.psp; order[12][index].length = key->l.psp; order[14][index].length = key->l.psp; order[15][index].length = key->l.psp; order[17][index].length = key->l.ps; order[18][index].length = key->l.psp; order[20][index].length = key->l.psp; memcpy(&block[index], key->o.psp.b, key->l.psp); memcpy(&block[index].b[key->l.psp], PADDING, 56 - key->l.psp); block[index].w[14] = key->l.psp << 3; MD5_swap(block[index].w, block[index].w, 14); #if MD5_X2 } MD5_body(block[0].w, block[1].w, MD5_out[0], MD5_out[1]); MD5_swap(MD5_out[0], MD5_out[0], 8); #else MD5_body(block[0].w, MD5_out[0]); MD5_swap(MD5_out[0], MD5_out[0], 4); #endif #if MD5_X2 for (index = 0, key = pool; index < MD5_N; index++, key++) { #endif memcpy(&block[index], key->o.p.b, key->l.p); memcpy(&block[index].b[key->l.p], prefix, prelen); memcpy(&block[index].b[key->l.p + prelen], key->s, key->l.s); memcpy(&block[index].b[key->l.ps + prelen], MD5_out[index], key->l.p); length = key->l.psp + prelen; if ((mask = key->l.p)) do { block[index].b[length++] = (mask & 1) ? 0 : key->o.p.b[0]; } while (mask >>= 1); memcpy(&block[index].b[length], PADDING, 56 - length); block[index].w[14] = length << 3; MD5_swap(block[index].w, block[index].w, 14); #if MD5_X2 } #else #undef index #undef key #endif #if MD5_X2 MD5_body(block[0].w, block[1].w, order[0][0].even->w, order[0][1].even->w); #else MD5_body(block[0].w, order[0][0].even->w); #endif index = 500; line = order[0]; do { #if ARCH_LITTLE_ENDIAN #if ARCH_ALLOWS_UNALIGNED #if MD5_X2 MD5_body(line[0].even->w, line[1].even->w, (MD5_word *)&line[0].odd->b[line[0].length], (MD5_word *)&line[1].odd->b[line[1].length]); #else MD5_body(line[0].even->w, (MD5_word *)&line[0].odd->b[line[0].length]); #endif #else #if MD5_X2 MD5_body(line[0].even->w, line[1].even->w, MD5_out[0], MD5_out[1]); memcpy(&line[0].odd->b[line[0].length], MD5_out[0], 16); memcpy(&line[1].odd->b[line[1].length], MD5_out[1], 16); #else if (((ARCH_WORD)&line[0].odd->b[line[0].length]) & 3) { MD5_body(line[0].even->w, MD5_out[0]); memcpy(&line[0].odd->b[line[0].length], MD5_out[0], 16); } else { MD5_body(line[0].even->w, (MD5_word *)&line[0].odd->b[line[0].length]); } #endif #endif last0 = line[0].odd->w; #if MD5_X2 last1 = line[1].odd->w; if ((line += 2) > &order[20][MD5_N - 1]) line = order[0]; MD5_body(last0, last1, line[0].even->w, line[1].even->w); #else if (++line > &order[20][0]) line = order[0]; MD5_body(last0, line[0].even->w); #endif #else #if MD5_X2 MD5_body(line[0].even->w, line[1].even->w, MD5_out[0], MD5_out[1]); MD5_swap(MD5_out[0], MD5_out[0], 8); #else MD5_body(line[0].even->w, MD5_out[0]); MD5_swap(MD5_out[0], MD5_out[0], 4); #endif memcpy(&line[0].odd->b[line[0].length], MD5_out[0], 16); #if MD5_X2 memcpy(&line[1].odd->b[line[1].length], MD5_out[1], 16); #endif MD5_swap(block[0].w, line[0].odd->w, 14); block[0].w[14] = line[0].odd->w[14]; #if MD5_X2 MD5_swap(block[1].w, line[1].odd->w, 14); block[1].w[14] = line[1].odd->w[14]; if ((line += 2) > &order[20][MD5_N - 1]) line = order[0]; MD5_body(block[0].w, block[1].w, line[0].even->w, line[1].even->w); #else if (++line > &order[20][0]) line = order[0]; MD5_body(block[0].w, line[0].even->w); #endif #endif } while (--index); memcpy(MD5_out[0], line[0].even, 16); #if MD5_X2 memcpy(MD5_out[1], line[1].even, 16); #endif } #if MD5_std_mt void MD5_std_crypt(int count) { #if MD5_std_mt int t, n = (count + (MD5_N - 1)) / MD5_N; #endif #ifdef _OPENMP #pragma omp parallel for default(none) private(t) shared(n, salt_changed, saved_salt) #endif for_each_t(n) { /* * We could move the salt_changed check out of the parallel region (and have * two specialized parallel regions instead), but MD5_std_crypt_for_thread() * does so much work that the salt_changed check is negligible. */ if (salt_changed) MD5_std_set_salt_for_thread(t, saved_salt); MD5_std_crypt_for_thread(t); } salt_changed = 0; } #endif char *MD5_std_get_salt(char *ciphertext) { static char out[9]; char *p, *q; int i; if (!strncmp(ciphertext, "$apr1$", 6)) { out[8] = MD5_TYPE_APACHE; p = ciphertext + 6; } else if (!strncmp(ciphertext, "{smd5}", 6)) { out[8] = MD5_TYPE_AIX; p = ciphertext + 6; } else { out[8] = MD5_TYPE_STD; p = ciphertext + 3; } q = out; for (i = 0; *p != '$' && i < 8; i++) *q++ = *p++; while (i++ < 8) *q++ = 0; return out; } #define TO_BINARY(b1, b2, b3) \ value = \ (MD5_word)atoi64[ARCH_INDEX(pos[0])] | \ ((MD5_word)atoi64[ARCH_INDEX(pos[1])] << 6) | \ ((MD5_word)atoi64[ARCH_INDEX(pos[2])] << 12) | \ ((MD5_word)atoi64[ARCH_INDEX(pos[3])] << 18); \ pos += 4; \ out.b[b1] = value >> 16; \ out.b[b2] = value >> 8; \ out.b[b3] = value; MD5_word *MD5_std_get_binary(char *ciphertext) { static union { MD5_binary w; char b[16]; } out; char *pos; MD5_word value; pos = ciphertext + 3; if (!strncmp(ciphertext, "$apr1$", 6) || !strncmp(ciphertext, "{smd5}", 6)) pos = ciphertext + 6; while (*pos++ != '$'); TO_BINARY(0, 6, 12); TO_BINARY(1, 7, 13); TO_BINARY(2, 8, 14); TO_BINARY(3, 9, 15); TO_BINARY(4, 10, 5); out.b[11] = (MD5_word)atoi64[ARCH_INDEX(pos[0])] | ((MD5_word)atoi64[ARCH_INDEX(pos[1])] << 6); #undef OOFFOOFF #define OOFFOOFF 0x00ff00ff MD5_swap(out.w, out.w, 4); #undef OOFFOOFF return out.w; }
example_08-StructOfArrays-CellLinkedList-OuterOmp.c
/* * SPDX-License-Identifier: BSD-3-Clause * * example_08-StructOfArrays-CellLinkedList-OuterOmp.c : * Example of SPH Density Calculation using * fast neighbor search the main density loop via * Cell Linked List method, Struct of Arrays (SoA) * data layout, OpenMP parallelization at the * cell level, SIMD directives in the kernel * and in the inner-most loop. * * (C) Copyright 2021 José Hugo Elsas * Author: José Hugo Elsas <jhelsas@gmail.com> * * Command Line Options: * -runs <int> : Set the number of repetitions (runs) for * calculating the density. The value of * the density is based on the last * iteration. * Default value: 1 * -run_seed <int>: Flag to set an alternative seed use for * for the PRNG. Instead of feeding seed * to the PRNG directly, it feeds * seed + iteration, as to generate different * configurations for each iteration. * Default value: 0 - (possible 0/1) * -seed <int>: Set the seed to use for the SPH particles * uniform position generation in the box * Default value: 123123123 * * -N <int>: Set the number of SPH particles to be used * Default value: 1e5 = 100,000 * -h <float>: Set the value of the smoothing kernel * parameter h, which corresponds to half * of the support of the kernel. * Default value: 0.05 * * -Nx <int>: Set the number of Cells in the X direction * Default value: 10 * -Ny <int>: Set the number of Cells in the Y direction * Default value: 10 * -Nz <int>: Set the number of Cells in the Z direction * Default value: 10 * * -Xmin <float>: Set the lower bound in the X direction for * the Cell Linked List box * Default value: 0.0 * -Ymin <float>: Set the lower bound in the Y direction for * the Cell Linked List box * Default value: 0.0 * -Ymin <float>: Set the lower bound in the Z direction for * the Cell Linked List box * Default value: 0.0 * * -Xmax <float>: Set the lower bound in the X direction for * the Cell Linked List box * Default value: 1.0 * -Ymax <float>: Set the lower bound in the Y direction for * the Cell Linked List box * Default value: 1.0 * -Zmax <float>: Set the lower bound in the Z direction for * the Cell Linked List box * Default value: 1.0 */ #include <math.h> #include <ctype.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <limits.h> #include <unistd.h> #include <stdbool.h> #include <sys/time.h> #include <inttypes.h> #include <omp.h> #include <gsl/gsl_math.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <gsl/gsl_heapsort.h> #include "sph_data_types.h" #include "sph_linked_list.h" #include "sph_utils.h" #ifndef M_PI #define M_PI (3.14159265358979323846) #endif #define COMPUTE_BLOCKS 5 int main_loop(int run, bool run_seed, int64_t N, double h, long int seed, void *swap_arr, linkedListBox *box, SPHparticle *lsph, double *times); int compute_density_3d_outerOmp(int N, double h, SPHparticle *lsph, linkedListBox *box); int compute_density_3d_chunk_noomp(int64_t node_begin, int64_t node_end, int64_t nb_begin, int64_t nb_end,double h, double* restrict x, double* restrict y, double* restrict z, double* restrict nu, double* restrict rho); int count_box_pairs(linkedListBox *box); int setup_box_pairs(linkedListBox *box, int64_t *node_begin,int64_t *node_end, int64_t *nb_begin,int64_t *nb_end); double w_bspline_3d_constant(double h); #pragma omp declare simd double w_bspline_3d_simd(double q); int main(int argc, char **argv){ bool run_seed = false; // By default the behavior is is to use the same seed int runs = 1,err; // it only runs once long int seed = 123123123; // The default seed is 123123123 int64_t N = 100000; // The default number of particles is N = 1e5 = 100,000 double h=0.05; // The default kernel smoothing length is h = 0.05 linkedListBox *box; // Uninitialized Box containing the cells for the cell linked list method SPHparticle *lsph; // Uninitialized array of SPH particles box = (linkedListBox*)malloc(1*sizeof(linkedListBox)); // Create a box representing the entire 3d domain // allow for command line customization of the run arg_parse(argc,argv,&N,&h,&seed,&runs,&run_seed,box); // Parse the command line options // line arguments and override default values err = SPHparticle_SoA_malloc(N,&lsph); if(err) fprintf(stderr,"error in SPHparticle_SoA_malloc\n"); void *swap_arr = malloc(N*sizeof(double)); double times[runs*COMPUTE_BLOCKS]; for(int run=0;run<runs;run+=1) main_loop(run,run_seed,N,h,seed,swap_arr,box,lsph,times); bool is_cll = true; const char *prefix = "ex08,cll,SoA,outer,simd"; print_time_stats(prefix,is_cll,N,h,seed,runs,lsph,box,times); print_sph_particles_density(prefix,is_cll,N,h,seed,runs,lsph,box); SPHparticleSOA_safe_free(N,&lsph); safe_free_box(box); free(swap_arr); return 0; } /* * Function main_loop: * Runs the main loop of the program, including the particle array generation, * density calculation and the timings annotations. * * Arguments: * run <int> : index (or value) or the present iteration * run_seed <bool> : boolean defining whether to use run index for seed or not * N <int> : Number of SPH particles to be used in the run * h <double> : Smoothing Length for the Smoothing Kernel w_bspline * seed <long int> : seed for GSL PRNG generator to generate particle positions * box <linkedListBox> : Box of linked list cells, encapsulating the 3d domain * lsph <SPHparticle> : Array (pointer) of SPH particles to be updated * times <double> : Array to store the computation timings to be updated * Returns: * 0 : error code returned * lsph <SPHparticle> : SPH particle array is updated in the rho field by reference * times <double> : Times is updated by reference */ int main_loop(int run, bool run_seed, int64_t N, double h, long int seed, void *swap_arr, linkedListBox *box, SPHparticle *lsph, double *times) { int err; if(run_seed) err = gen_unif_rdn_pos_box(N,seed+run,box,lsph); else err = gen_unif_rdn_pos_box(N,seed,box,lsph); if(err) fprintf(stderr,"error in gen_unif_rdn_pos\n"); // ------------------------------------------------------ // double t0,t1,t2,t3,t4,t5; t0 = omp_get_wtime(); err = compute_hash_MC3D(N,lsph,box); // Compute Morton Z 3D hash based on the if(err) // cell index for each of the X, Y and Z fprintf(stderr,"error in compute_hash_MC3D\n"); // directions, in which a given particle reside t1 = omp_get_wtime(); qsort(lsph->hash,N,2*sizeof(int64_t),compare_int64_t); // Sort the Particle Hash Hashes, getting the shuffled // index necessary to re-shuffle the remaining arrays t2 = omp_get_wtime(); err = reorder_lsph_SoA(N,lsph,swap_arr); // Reorder all arrays according to the sorted hash, if(err) // As to have a quick way to retrieve a cell fprintf(stderr,"error in reorder_lsph_SoA\n"); // given its hash. t3 = omp_get_wtime(); err = setup_interval_hashtables(N,lsph,box); // Annotate the begining and end of each cell if(err) // on the cell linked list method for fast fprintf(stderr,"error in setup_interval_hashtables\n"); // neighbor search t4 = omp_get_wtime(); err = compute_density_3d_outerOmp(N,h,lsph,box); // Compute the density of the particles based if(err) // on the cell linked list method for fast fprintf(stderr,"error in compute_density\n"); // neighbor search // ------------------------------------------------------ // t5 = omp_get_wtime(); times[COMPUTE_BLOCKS*run+0] = t1-t0; // Time for compute morton Z 3d hash times[COMPUTE_BLOCKS*run+1] = t2-t1; // Time for sorting the particles' hashes times[COMPUTE_BLOCKS*run+2] = t3-t2; // Time for reordering all other arrays accordingly times[COMPUTE_BLOCKS*run+3] = t4-t3; // Time for setting up the interval hash tables times[COMPUTE_BLOCKS*run+4] = t5-t4; // Time for computing the SPH particle densities return 0; } /* * Function compute_density_3d_outerOmp: * Computes the SPH density from the particles using cell linked list with * vectorization at the compute_density_3d_chunk level, but the parallelization * done at the level of the outer-most loop of the compute_density_3d_cll_outerOmp * function, not at the chunk level. * * Arguments: * N <int> : Number of SPH particles to be used in the run * h <double> : Smoothing Length for the Smoothing Kernel w_bspline * lsph <SPHparticle> : Array (pointer) of SPH particles to be updated * Returns: * 0 : error code returned * lsph <SPHparticle> : SPH particle array is updated in the rho field by reference */ int compute_density_3d_outerOmp(int N, double h, SPHparticle *lsph, linkedListBox *box){ memset(lsph->rho,(int)0,N*sizeof(double)); // Pre-initialize the density to zero #pragma omp parallel for // Execute the iteration in parallel for (khint32_t kbegin = kh_begin(box->hbegin); kbegin != kh_end(box->hbegin); kbegin++){ // Iterate over each receiver cell begin index int64_t node_hash=-1,node_begin=0, node_end=0; // Start initializing the node indexes on the array int64_t nb_begin= 0, nb_end = 0; // initialize the neighbor indexes int64_t nblist[(2*box->width+1)*(2*box->width+1)*(2*box->width+1)]; // prepare a list of potential neighbor hashes if (kh_exist(box->hbegin, kbegin)){ // verify if that given iterator actually exists khint32_t kend = kh_get(1, box->hend, kh_key(box->hbegin, kbegin)); // Then get the end of the receiver cell iterator node_hash = kh_key(box->hbegin, kbegin); // Then get the hash corresponding to it node_begin = kh_value(box->hbegin, kbegin); // Get the receiver cell begin index in the array node_end = kh_value(box->hend, kend); // Get the receiver cell end index in the array neighbour_hash_3d(node_hash,nblist,box->width,box); // then find the hashes of its neighbors for(int j=0;j<(2*box->width+1)*(2*box->width+1)*(2*box->width+1);j+=1){ // and the iterate over them if(nblist[j]>=0){ // if a given neighbor actually has particles nb_begin = kh_value(box->hbegin, kh_get(0, box->hbegin, nblist[j]) ); // then get the contributing cell begin index nb_end = kh_value(box->hend , kh_get(1, box->hend , nblist[j]) ); // and get the contributing cell end index compute_density_3d_chunk_noomp(node_begin,node_end,nb_begin,nb_end,h, // and compute the density contribution from lsph->x,lsph->y,lsph->z,lsph->nu,lsph->rho); // the contributing cell to the receiver cell } } } } return 0; } /* * Function compute_density_3d_chunk_noomp: * Computes the SPH density contribution for a pair of cells, from nb_ indexes * to the node_ indexes. No parallelization is performed with vectorization * performed in the inner loop. * * Arguments: * node_begin <int> : Begin index of the receiver cell * node_end <int> : End index of the receiver cell * nb_begin <int> : Begin index of the sender (neighbor) cell * nb_end <int> : End index of the sender (neighbor) cell * h <double> : Smoothing Length for the Smoothing Kernel w_bspline * x <double*> : Array of particles' X positions * y <double*> : Array of particles' Y positions * z <double*> : Array of particles' Z positions * nu <double*> : Array of particles' density weights (i.e. masses) * Returns: * 0 : error code returned * rho <double*> : Array of particles' densities */ int compute_density_3d_chunk_noomp(int64_t node_begin, int64_t node_end, int64_t nb_begin, int64_t nb_end,double h, double* restrict x, double* restrict y, double* restrict z, double* restrict nu, double* restrict rho){ const double inv_h = 1./h; const double kernel_constant = w_bspline_3d_constant(h); for(int64_t ii=node_begin;ii<node_end;ii+=1){ // Iterate over the ii index of the chunk double xii = x[ii]; // Load the X component of the ii particle position double yii = y[ii]; // Load the Y component of the ii particle position double zii = z[ii]; // Load the Z component of the ii particle position double rhoii = 0.0; // Initialize the chunk contribution to density #pragma omp simd // Hint at the compiler to vectorize for(int64_t jj=nb_begin;jj<nb_end;jj+=1){ // Iterate over the each other particle in jj loop double q = 0.; // Initialize the distance double xij = xii-x[jj]; // Load and subtract jj particle's X position component double yij = yii-y[jj]; // Load and subtract jj particle's Y position component double zij = zii-z[jj]; // Load and subtract jj particle's Z position component q += xij*xij; // Add the jj contribution to the ii distance in X q += yij*yij; // Add the jj contribution to the ii distance in X q += zij*zij; // Add the jj contribution to the ii distance in X q = sqrt(q)*inv_h; // Sqrt to compute the distance rhoii += nu[jj]*w_bspline_3d_simd(q); // Add up the contribution from the jj particle } // to the intermediary density and then rho[ii] += rhoii*kernel_constant; // add the intermediary density to the full density } return 0; } /* * Function w_bspline_3d_constant: * Returns the 3d normalization constant for the cubic b-spline SPH smoothing kernel * * Arguments: * h <double> : Smoothing Length for the Smoothing Kernel w_bspline * Returns: * 3d bspline normalization density <double> */ double w_bspline_3d_constant(double h){ return 3./(2.*M_PI*h*h*h); // 3d normalization value for the b-spline kernel } /* * Function w_bspline_3d_simd: * Returns the un-normalized value of the cubic b-spline SPH smoothing kernel * * Arguments: * q <double> : Distance between particles normalized by the smoothing length h * Returns: * wq <double> : Unnormalized value of the kernel * * Observation: * Why not else if(q<2.)? * Because if you use "else if", the compiler refuses to vectorize, * This results in a large slowdown, as of 2.5x slower for example_04 */ #pragma omp declare simd double w_bspline_3d_simd(double q){ double wq=0; double wq1 = (0.6666666666666666 - q*q + 0.5*q*q*q); // The first polynomial of the spline double wq2 = 0.16666666666666666*(2.-q)*(2.-q)*(2.-q); // The second polynomial of the spline if(q<2.) // If the distance is below 2 wq = wq2; // Use the 2nd polynomial for the spline if(q<1.) // If the distance is below 1 wq = wq1; // Use the 1st polynomial for the spline return wq; // return which ever value corresponds to the distance }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/DarwinSDKInfo.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Tracks expected type during expression parsing, for use in code completion. /// The type is tied to a particular token, all functions that update or consume /// the type take a start location of the token they are looking at as a /// parameter. This avoids updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Handles e.g. BaseType{ .D = Tok... void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType, const Designation &D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. /// /// The callback should also emit signature help as a side-effect, but only /// if the completion point has been reached. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); /// Get the expected type associated with this location, if any. /// /// If the location is a function argument, determining the expected type /// involves considering all function overloads and the arguments so far. /// In this case, signature help for these function overloads will be reported /// as a side-effect (only if the completion point has been reached). QualType get(SourceLocation Tok) const { if (!Enabled || Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: bool Enabled; /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 32; static const uint64_t MaximumAlignment = 1ull << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; // #pragma pack and align. class AlignPackInfo { public: // `Native` represents default align mode, which may vary based on the // platform. enum Mode : unsigned char { Native, Natural, Packed, Mac68k }; // #pragma pack info constructor AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL) : PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) { assert(Num == PackNumber && "The pack number has been truncated."); } // #pragma align info constructor AlignPackInfo(AlignPackInfo::Mode M, bool IsXL) : PackAttr(false), AlignMode(M), PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {} explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {} AlignPackInfo() : AlignPackInfo(Native, false) {} // When a AlignPackInfo itself cannot be used, this returns an 32-bit // integer encoding for it. This should only be passed to // AlignPackInfo::getFromRawEncoding, it should not be inspected directly. static uint32_t getRawEncoding(const AlignPackInfo &Info) { std::uint32_t Encoding{}; if (Info.IsXLStack()) Encoding |= IsXLMask; Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1; if (Info.IsPackAttr()) Encoding |= PackAttrMask; Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4; return Encoding; } static AlignPackInfo getFromRawEncoding(unsigned Encoding) { bool IsXL = static_cast<bool>(Encoding & IsXLMask); AlignPackInfo::Mode M = static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1); int PackNumber = (Encoding & PackNumMask) >> 4; if (Encoding & PackAttrMask) return AlignPackInfo(M, PackNumber, IsXL); return AlignPackInfo(M, IsXL); } bool IsPackAttr() const { return PackAttr; } bool IsAlignAttr() const { return !PackAttr; } Mode getAlignMode() const { return AlignMode; } unsigned getPackNumber() const { return PackNumber; } bool IsPackSet() const { // #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack // attriute on a decl. return PackNumber != UninitPackVal && PackNumber != 0; } bool IsXLStack() const { return XLStack; } bool operator==(const AlignPackInfo &Info) const { return std::tie(AlignMode, PackNumber, PackAttr, XLStack) == std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr, Info.XLStack); } bool operator!=(const AlignPackInfo &Info) const { return !(*this == Info); } private: /// \brief True if this is a pragma pack attribute, /// not a pragma align attribute. bool PackAttr; /// \brief The alignment mode that is in effect. Mode AlignMode; /// \brief The pack number of the stack. unsigned char PackNumber; /// \brief True if it is a XL #pragma align/pack stack. bool XLStack; /// \brief Uninitialized pack value. static constexpr unsigned char UninitPackVal = -1; // Masks to encode and decode an AlignPackInfo. static constexpr uint32_t IsXLMask{0x0000'0001}; static constexpr uint32_t AlignModeMask{0x0000'0006}; static constexpr uint32_t PackAttrMask{0x00000'0008}; static constexpr uint32_t PackNumMask{0x0000'01F0}; }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; PragmaStack<AlignPackInfo> AlignPackStack; // The current #pragma align/pack values and locations at each #include. struct AlignPackIncludeState { AlignPackInfo CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack<FPOptionsOverride> FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FpPragmaStack.CurrentValue; } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>, llvm::SmallPtrSet<Expr *, 4>>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The namespace where coroutine components are defined. In standard, /// they are defined in std namespace. And in the previous implementation, /// they are defined in std::experimental namespace. NamespaceDecl *CoroTraitsNamespaceCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// In addition of being constant evaluated, the current expression /// occurs in an immediate function context - either a consteval function /// or a consteval if function. ImmediateFunctionContext, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated || Context == ExpressionEvaluationContext::ImmediateFunctionContext; } bool isImmediateFunctionContext() const { return Context == ExpressionEvaluationContext::ImmediateFunctionContext; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. const TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; class GlobalMethodPool { public: using Lists = std::pair<ObjCMethodList, ObjCMethodList>; using iterator = llvm::DenseMap<Selector, Lists>::iterator; iterator begin() { return Methods.begin(); } iterator end() { return Methods.end(); } iterator find(Selector Sel) { return Methods.find(Sel); } std::pair<iterator, bool> insert(std::pair<Selector, Lists> &&Val) { return Methods.insert(Val); } int count(Selector Sel) const { return Methods.count(Sel); } bool empty() const { return Methods.empty(); } private: llvm::DenseMap<Selector, Lists> Methods; }; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) { OldOverrides = S.FpPragmaStack.CurrentValue; } ~FPFeaturesStateRAII() { S.CurFPFeatures = OldFPFeaturesState; S.FpPragmaStack.CurrentValue = OldOverrides; } FPOptionsOverride getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; FPOptionsOverride OldOverrides; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; /// Increment when we find a reference; decrement when we find an ignored /// assignment. Ultimately the value is 0 if every reference is an ignored /// assignment. llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments; Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); /// This virtual key function only exists to limit the emission of debug info /// describing the Sema class. GCC and Clang only emit debug info for a class /// with a vtable when the vtable is emitted. Sema is final and not /// polymorphic, but the debug info size savings are so significant that it is /// worth adding a vtable just to take advantage of this optimization. virtual void anchor(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc, StringRef Platform); ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. ImmediateDiagBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class ImmediateDiagBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op // in that case anwyay. ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default; ~ImmediateDiagBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First clear the diagnostic // builder itself so it won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template <typename T> friend const ImmediateDiagBuilder & operator<<(const ImmediateDiagBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const ImmediateDiagBuilder &operator<<(T &&V) const { const DiagnosticBuilder &BaseDiag = *this; BaseDiag << std::move(V); return *this; } }; /// A generic diagnostic builder for errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class SemaDiagnosticBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D); SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default; ~SemaDiagnosticBuilder(); bool isImmediate() const { return ImmediateDiag.hasValue(); } /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (SemaDiagnosticBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a SemaDiagnosticBuilder yourself. operator bool() const { return isImmediate(); } template <typename T> friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const SemaDiagnosticBuilder &operator<<(T &&V) const { if (ImmediateDiag.hasValue()) *ImmediateDiag << std::move(V); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V); return *this; } friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) { if (Diag.ImmediateDiag.hasValue()) PD.Emit(*Diag.ImmediateDiag); else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD; return Diag; } void AddFixItHint(const FixItHint &Hint) const { if (ImmediateDiag.hasValue()) ImmediateDiag->AddFixItHint(Hint); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint); } friend ExprResult ExprError(const SemaDiagnosticBuilder &) { return ExprError(); } friend StmtResult StmtError(const SemaDiagnosticBuilder &) { return StmtError(); } operator ExprResult() const { return ExprError(); } operator StmtResult() const { return StmtError(); } operator TypeResult() const { return TypeError(); } operator DeclResult() const { return DeclResult(true); } operator MemInitResult() const { return MemInitResult(true); } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<ImmediateDiagBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Is the last error level diagnostic immediate. This is used to determined /// whether the next info diagnostic should be immediate. bool IsLastErrorImmediate = true; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint = false); /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint = false); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h /// Whether deferrable diagnostics should be deferred. bool DeferDiags = false; /// RAII class to control scope of DeferDiags. class DeferDiagsRAII { Sema &S; bool SavedDeferDiags = false; public: DeferDiagsRAII(Sema &S, bool DeferDiags) : S(S), SavedDeferDiags(S.DeferDiags) { S.DeferDiags = DeferDiags; } ~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; } }; /// Whether uncompilable error has occurred. This includes error happens /// in deferred diagnostics. bool hasUncompilableErrorOccurred() const; bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void setFunctionHasMustTail(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// Retrieve the current function, if any, that should be analyzed for /// potential availability violations. sema::FunctionScopeInfo *getCurFunctionAvailabilityContext(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal argument for the /// swift_name attribute applied to decl \p D. Raise a diagnostic if the name /// is invalid for the given declaration. /// /// \p AL is used to provide caret diagnostics in case of a malformed name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc, const ParsedAttr &AL, bool IsAsync); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); // When loading a non-modular PCH files, this is used to restore module // visibility. void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) { VisibleModules.setVisible(Mod, ImportLoc); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser); } /// Get the type of expression E, triggering instantiation to complete the /// type if necessary -- that is, if the expression refers to a templated /// static data member of incomplete array type. /// /// May still return an incomplete type if instantiation was not possible or /// if the type is incomplete for a different reason. Use /// RequireCompleteExprType instead if a diagnostic is expected for an /// incomplete expression type. QualType getCompletedType(Expr *E); void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); // Returns the underlying type of a decltype with the given expression. QualType getDecltypeForExpr(Expr *E); QualType BuildTypeofExprType(Expr *E); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as an overload set, and an expression /// representing that overload set has been formed. /// ActOnNameClassifiedAsOverloadSet should be called to form a suitable /// expression referencing the overload set. NC_OverloadSet, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification OverloadSet(ExprResult E) { NameClassification Result(NC_OverloadSet); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_OverloadSet); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Act on the result of classifying a name as an overload set. ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); void warnOnReservedIdentifier(const NamedDecl *D); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo, QualType &T, SourceLocation Loc, unsigned FailedFoldDiagID); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range); bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const BindingDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); ExprResult ActOnRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, bool IsAbstract, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, /// Merge availability attributes for an implementation of /// an optional protocol requirement. AMK_OptionalProtocolImplementation }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); ErrorAttr *mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI, StringRef NewUserDiagnostic); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA, StringRef Name); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL); EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D, const EnforceTCBLeafAttr &AL); BTFDeclTagAttr *mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(QualType Param, QualType Arg); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool IsStringInit(Expr *Init, const ArrayType *AT); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_ArrayBound, ///< Array bound in array declarator or new-expression. CCEK_ExplicitBool, ///< Condition in an explicit(bool) specifier. CCEK_Noexcept ///< Condition in a noexcept(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE, NamedDecl *Dest = nullptr); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); void AddOverloadedCallCandidates( LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass, NestedNameSpecifierLoc NNSLoc, DeclarationNameInfo DNI, const UnresolvedSetImpl &Fns, bool PerformADL = true); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplatePack, }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id, bool IsUDSuffix); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing, StringLiteral *StringLit = nullptr); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID, SourceLocation Loc); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Handles semantic checking for features that are common to all attributes, /// such as checking whether a parameter was properly specified, or the /// correct number of arguments were passed, etc. Returns true if the /// attribute has been diagnosed. bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A); bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); llvm::Error isValidSectionSpecifier(StringRef Str); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Process the attributes before creating an attributed statement. Returns /// the semantic attributes that have been processed. void ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesWithRange &InAttrs, SmallVectorImpl<const Attr *> &OutAttrs); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnAfterCompoundStatementLeadingPragmas(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult BuildAttributedStmt(SourceLocation AttrsLoc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); struct NamedReturnInfo { const VarDecl *Candidate; enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable }; Status S; bool isMoveEligible() const { return S != None; }; bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; } }; enum class SimplerImplicitMoveMode { ForceOff, Normal, ForceOn }; NamedReturnInfo getNamedReturnInfo( Expr *&E, SimplerImplicitMoveMode Mode = SimplerImplicitMoveMode::Normal); NamedReturnInfo getNamedReturnInfo(const VarDecl *VD); const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info, QualType ReturnType); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const NamedReturnInfo &NRInfo, Expr *Value, bool SupressSimplerImplicitMoves = false); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, NamedReturnInfo &NRInfo, bool SupressSimplerImplicitMoves); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// If VD is set but not otherwise used, diagnose, for a parameter or a /// variable. void DiagnoseUnusedButSetDecl(const VarDecl *VD); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the statements's reachability /// analysis. /// /// \param Stmts If Stmts is non-empty, delay reporting the diagnostic until /// the function body is parsed, and then do a basic reachability analysis to /// determine if the statement is reachable. If it is unreachable, the /// diagnostic will not be emitted. bool DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts, const PartialDiagnostic &PD); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseDependentMemberLookup(LookupResult &R); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr( const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, UnresolvedLookupExpr *AsULE = nullptr); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, TypeSourceInfo *TSI); ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, ParsedType ParsedTy); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef<OMPIteratorData> Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id, MultiExprArg CallArgs); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, UnresolvedSetImpl &Functions); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); NamespaceDecl *getCachedCoroNamespace() { return CoroTraitsNamespaceCache; } CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void FilterUsingLookup(Scope *S, LookupResult &lookup); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc, const LookupResult *R = nullptr, const UsingDecl *UD = nullptr); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation, bool IsUsingIfExists); NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, SourceLocation NameLoc, EnumDecl *ED); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, const DeclSpec &); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, QualType DeclInitType, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr *> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); // Checks that the vector type should be initialized from a scalar // by splatting the value rather than populating a single element. // This is the case for AltiVecVector types as well as with // AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified. bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy); // Checks if the -faltivec-src-compat=gcc option is specified. // If so, AltiVecVector, AltiVecBool and AltiVecPixel types are // treated the same way as they are when trying to initialize // these vectors on gcc (an error is emitted). bool CheckAltivecInitFromScalar(SourceRange R, QualType VecTy, QualType SrcTy); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee, SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); // Complete an enum decl, maybe without a scope spec. bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L, CXXScopeSpec *SS = nullptr); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc, ExprResult RequiresClause); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType, CallingConv CC); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref<Scope *()> EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. static NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional<SourceLocation> TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool BuildTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc, bool AllowUnexpandedPack); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool RequireStructuralType(QualType T, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); /// Get the specialization of the given variable template corresponding to /// the specified argument list, or a null-but-valid result if the arguments /// are dependent. DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); /// Form a reference to the specialization of the given variable template /// corresponding to the specified argument list, or a null-but-valid result /// if the arguments are dependent. ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression. UPPC_Block, /// A type constraint. UPPC_TypeConstraint, // A requirement in a requires-expression. UPPC_Requirement, // A requires-clause. UPPC_RequiresClause, }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given requirees-expression contains an unexpanded reference to one /// of its own parameter packs, diagnose the error. /// /// \param RE The requiress-expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); // Substitute auto in TypeWithAuto for a Dependent auto type QualType SubstAutoTypeDependent(QualType TypeWithAuto); // Substitute auto in TypeWithAuto for a Dependent auto type TypeSourceInfo * SubstAutoTypeSourceInfoDependent(TypeSourceInfo *TypeWithAuto); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } bool isImmediateFunctionContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); for (const ExpressionEvaluationContextRecord &context : llvm::reverse(ExprEvalContexts)) { if (context.isImmediateFunctionContext()) return true; if (context.isUnevaluated()) return false; } return false; } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaAlignPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaAlignPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, NamedDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called on well formed '\#pragma clang fp' that has option 'exceptions'. void ActOnPragmaFPExceptions(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// Called to set constant rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D. void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Annot, MutableArrayRef<Expr *> Args); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); /// Lookup 'coroutine_traits' in std namespace and std::experimental /// namespace. The namespace found is recorded in Namespace. ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc, NamespaceDecl *&Namespace); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; struct DeclareTargetContextInfo { struct MapInfo { OMPDeclareTargetDeclAttr::MapTypeTy MT; SourceLocation Loc; }; /// Explicitly listed variables and functions in a 'to' or 'link' clause. llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped; /// The 'device_type' as parsed from the clause. OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any; /// The directive kind, `begin declare target` or `declare target`. OpenMPDirectiveKind Kind; /// The directive location. SourceLocation Loc; DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc) : Kind(Kind), Loc(Loc) {} }; /// Number of nested '#pragma omp declare target' directives. SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true, bool SuppressExprDiags = false); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Analyzes and checks a loop nest for use by a loop transformation. /// /// \param Kind The loop transformation directive kind. /// \param NumLoops How many nested loops the directive is expecting. /// \param AStmt Associated statement of the transformation directive. /// \param LoopHelpers [out] The loop analysis result. /// \param Body [out] The body code nested in \p NumLoops loop. /// \param OriginalInits [out] Collection of statements and declarations that /// must have been executed/declared before entering the /// loop. /// /// \return Whether there was any error. bool checkTransformableLoopNest( OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops, SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers, Stmt *&Body, SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>> &OriginalInits); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// Return the OMPTraitInfo for the surrounding scope, if any. OMPTraitInfo *getOMPTraitInfoForSurroundingScope() { return OMPDeclareVariantScopes.empty() ? nullptr : OMPDeclareVariantScopes.back().TI; } /// The current `omp begin/end declare variant` scopes. SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes; /// The current `omp begin/end assumes` scopes. SmallVector<AssumptionAttr *, 4> OMPAssumeScoped; /// All `omp assumes` we encountered so far. SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal; public: /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. Return all base functions in \p Bases. void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, SmallVectorImpl<FunctionDecl *> &Bases); /// Register \p D as specialization of all base functions in \p Bases in the /// current `omp begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( Decl *D, SmallVectorImpl<FunctionDecl *> &Bases); /// Act on \p D, a function definition inside of an `omp [begin/end] assumes`. void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D); /// Can we exit an OpenMP declare variant scope at the moment. bool isInOpenMPDeclareVariantScope() const { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); /// Called on well-formed '\#pragma omp metadirective' after parsing /// of the associated statement. StmtResult ActOnOpenMPMetaDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp [begin] assume[s]'. void ActOnOpenMPAssumesDirective(SourceLocation Loc, OpenMPDirectiveKind DKind, ArrayRef<std::string> Assumptions, bool SkippedClauses); /// Check if there is an active global `omp begin assumes` directive. bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); } /// Check if there is an active global `omp assumes` directive. bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); } /// Called on well-formed '#pragma omp end assumes'. void ActOnOpenMPEndAssumesDirective(); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; const ValueDecl *getOpenMPDeclareMapperVarName() const; /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Called at the end of target region i.e. '#pragma omp end declare target'. const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective(); /// Called once a target context is completed, that can be when a /// '#pragma omp end declare target' was encountered or when a /// '#pragma omp declare target' without declaration-definition-seq was /// encountered. void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return !DeclareTargetNesting.empty(); } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to /// an OpenMP loop directive. StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt); /// Process a canonical OpenMP loop nest that can either be a canonical /// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an /// OpenMP loop transformation construct. StmtResult ActOnOpenMPLoopnest(Stmt *AStmt); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '#pragma omp tile' after parsing of its clauses and /// the associated statement. StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '#pragma omp unroll' after parsing of its clauses /// and the associated statement. StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp depobj'. StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp scan'. StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp interop'. StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp dispatch' after parsing of the // /associated statement. StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp masked' after parsing of the // /associated statement. StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp loop' after parsing of the /// associated statement. StmtResult ActOnOpenMPGenericLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \param NumAppendArgs The number of omp_interop_t arguments to account for /// in checking. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, unsigned NumAppendArgs, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. /// \param AdjustArgsNothing The list of 'nothing' arguments. /// \param AdjustArgsNeedDevicePtr The list of 'need_device_ptr' arguments. /// \param AppendArgs The list of 'append_args' arguments. /// \param AdjustArgsLoc The Location of an 'adjust_args' clause. /// \param AppendArgsLoc The Location of an 'append_args' clause. /// \param SR The SourceRange of the 'declare variant' directive. void ActOnOpenMPDeclareVariantDirective( FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, ArrayRef<Expr *> AdjustArgsNothing, ArrayRef<Expr *> AdjustArgsNeedDevicePtr, ArrayRef<OMPDeclareVariantAttr::InteropType> AppendArgs, SourceLocation AdjustArgsLoc, SourceLocation AppendArgsLoc, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'align' clause. OMPClause *ActOnOpenMPAlignClause(Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'sizes' clause. OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'full' clauses. OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-form 'partial' clauses. OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'when' clause. OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'init' clause. OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs, bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'use' clause. OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'novariants' clause. OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'nocontext' clause. OMPClause *ActOnOpenMPNocontextClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'filter' clause. OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause *ActOnOpenMPMapClause( ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, bool NoDiagnose = false, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause * ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<UsesAllocatorsData> Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// Called on a well-formed 'bind' clause. OMPClause *ActOnOpenMPBindClause(OpenMPBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_PRValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. In the success case, /// the statement is rewritten to remove implicit nodes from the return /// value. bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA); private: /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. bool checkMustTailAttr(const Stmt *St, const Attr &MTA); public: /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool isValidSveBitcast(QualType srcType, QualType destType); bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy); bool areVectorTypesSameSize(QualType srcType, QualType destType); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckMatrixCast - Check type constraints for matrix casts. // We allow casting between matrixes of the same dimensions i.e. when they // have the same number of rows and column. Returns true if the cast is // invalid. bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy, CastKind &Kind); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual SemaDiagnosticBuilder diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T); virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S, SourceLocation Loc) = 0; virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc); virtual ~VerifyICEDiagnoser() {} }; enum AllowFoldKind { NoFold, AllowFold, }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, AllowFoldKind CanFold = NoFold) { return VerifyIntegerConstantExpression(E, nullptr, CanFold); } /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics /// unless \p EmitOnBothSides is true. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD = nullptr); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, const PartialDiagnostic &PD, FunctionDecl *FD = nullptr) { return targetDiag(Loc, PD.getDiagID(), FD) << PD; } /// Check if the type is allowed to be used for the current target. void checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); enum CUDAVariableTarget { CVT_Device, /// Emitted on device side with a shadow variable on host side CVT_Host, /// Emitted on host side only CVT_Both, /// Emitted on both sides with different addresses CVT_Unified, /// Emitted as a unified address, e.g. managed variables }; /// Determines whether the given variable is emitted on host or device side. CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); enum class AttributeCompletion { Attribute, Scope, None, }; void CodeCompleteAttribute( AttributeCommonInfo::Syntax Syntax, AttributeCompletion Completion = AttributeCompletion::Attribute, const IdentifierInfo *Scope = nullptr); /// Determines the preferred type of the current function argument, by /// examining the signatures of all possible overloads. /// Returns null if unknown or ambiguous, or if code completion is off. /// /// If the code completion point has been reached, also reports the function /// signatures that were considered. /// /// FIXME: rename to GuessCallArgumentType to reduce confusion. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, StringRef ParamName, QualType ArgTy, QualType ParamTy); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum); bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinComplex(CallExpr *TheCall); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); bool SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinArithmeticFence(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, const char *TypeDesc); bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc); bool SemaBuiltinElementwiseMath(CallExpr *TheCall); bool SemaBuiltinElementwiseMathOneArg(CallExpr *TheCall); bool SemaBuiltinReduceMath(CallExpr *TheCall); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckFreeArguments(const CallExpr *E); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Nullable_result = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; template <> void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, AlignPackInfo Value); } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getHashValue()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
image_manipulation.h
////////////////////////////////////////////////////////////////////////// // Software License Agreement (BSD License) // // // // Copyright (c) 2009 // // Engin Tola // // web : http://cvlab.epfl.ch/~tola // // email : engin.tola@epfl.ch // // // // All rights reserved. // // // // Redistribution and use in source and binary forms, with or without // // modification, are permitted provided that the following conditions // // are met: // // // // * Redistributions of source code must retain the above copyright // // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // // copyright notice, this list of conditions and the following // // disclaimer in the documentation and/or other materials provided // // with the distribution. // // * Neither the name of the EPFL nor the names of its // // contributors may be used to endorse or promote products derived // // from this software without specific prior written permission. // // // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS // // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE // // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // // POSSIBILITY OF SUCH DAMAGE. // // // // See licence.txt file for more details // ////////////////////////////////////////////////////////////////////////// #ifndef KUTILITY_IMAGE_MANIPULATION_H #define KUTILITY_IMAGE_MANIPULATION_H #include "kutility/kutility_def.h" #include "kutility/general.h" namespace kutility { template<typename T1, typename T2> void scale( T1* src, int h, int w, float sc, T2* dst, int dh, int dw ) { int nh = int( h*sc ); int nw = int( w*sc ); assert( dst != NULL ); assert( nh == dh ); assert( nw == dw ); if( sc == 1 ) { for( int i=0; i<h*w; i++ ) dst[i] = (T2)src[i]; return; } double scale_factor = 1.0 / sc; memset(dst, 0, sizeof(T2)*dh*dw ); float y,x; for( int ny=0; ny<nh; ny++ ) { y = ny * scale_factor; if( y>= h-1 ) continue; for( int nx=0; nx<nw; nx++ ) { x = nx * scale_factor; if( x>= w-1 ) continue; dst[ny*nw+nx] = (T2)bilinear_interpolation(src, w, x, y); } } } template<class T> inline void rgb_to_y(T* cim, int h, int w, T* gim ) { assert( (gim!=NULL) && (cim!=NULL) ); for( int y=0; y<h; y++ ) { for( int x=0; x<w; x++ ) { int index=y*w+x; float r=cim[3*index ]; float g=cim[3*index+1]; float b=cim[3*index+2]; gim[index] = T( 0.299*r + 0.587*g + 0.114*b ); } } } template<class T> inline void y_to_rgb(T* yim, int h, int w, T* rgbim ) { assert( rgbim != NULL ); int wh = w*h; for( int k=0; k<wh; k++ ) { rgbim[ 3*k ] = yim[k]; rgbim[ 3*k+1 ] = yim[k]; rgbim[ 3*k+2 ] = yim[k]; } } template<class T> inline void rgb_to_bgr(T* rgb, int h, int w, T* bgr ) { assert( bgr != NULL ); int wh3 = w*h*3; for( int k=0; k<wh3; k+=3 ) { T tmp = bgr[k]; rgb[ k ] = bgr[ k+2 ]; rgb[ k+1 ] = bgr[ k+1 ]; rgb[ k+2 ] = tmp; } } template<class T> inline void bgr_to_rgb(T* bgr, int h, int w, T* rgb ) { rgb_to_bgr(bgr,h,w,rgb); } template<class T> inline void rgba_to_y(T* cim, int h, int w, T* gim ) { assert( (gim!=NULL) && (cim!=NULL) ); for( int y=0; y<h; y++ ) { for( int x=0; x<w; x++ ) { int index=y*w+x; float r=cim[4*index ]; float g=cim[4*index+1]; float b=cim[4*index+2]; gim[index] = T( 0.299*r + 0.587*g + 0.114*b ); } } } template<class T> inline void rgba_to_rgb(T* rgbaim, int h, int w, T* rgbim ) { assert( (rgbim!=NULL) && (rgbaim!=NULL) ); int wh = w*h; for( int k=0; k<wh; k++ ) { rgbim[3*k ] = rgbaim[4*k ]; rgbim[3*k+1] = rgbaim[4*k+1]; rgbim[3*k+2] = rgbaim[4*k+2]; } } uchar* clean_image (uchar * &image, int w, int h, bool in_place=false); uchar* apply_erosion (uchar * &image, int w, int h, bool in_place=false); uchar* apply_dilation(uchar * &image, int w, int h, bool in_place=false); uchar* down_sample (uchar * image, int w, int h); uchar* resize_image( uchar* &image, int h, int w, int nh, int nw, bool in_place=false); /// scales the image intensity between a lower "il" and an upper /// "iu" value. "sz" is the image size. /// by deafult il=0 and ui = 1; double* scale_intensity( uchar* image, int sz, double il=0, double iu=1); template<class T> void decompose_channels( T* image, int h, int w, T* &ch_0, T* &ch_1, T* &ch_2) { int image_size = h*w; ch_0 = kutility::allocate<uchar>(image_size); ch_1 = kutility::allocate<uchar>(image_size); ch_2 = kutility::allocate<uchar>(image_size); #if defined(WITH_OPENMP) #pragma omp parallel for #endif for( int y=0; y<h; y++ ) { int yw = y*w; for( int x=0; x<w; x++ ) { int index = yw+x; int cindex = 3*index; ch_0[index] = image[index ]; ch_1[index] = image[index+1]; ch_2[index] = image[index+2]; } } } /// applies gamma correction template<class T> inline T* gamma_correction( T* im, int h, int w, double gamma, bool in_place=false) { int sz = w*h; T* out; if( !in_place ) out = kutility::allocate<T>(sz); else out = im; double val; for( int i=0; i<sz; i++ ) { val = (pow( (double)im[i], gamma )); if( val > 255 ) out[i] = (T)255; else out[i] = (T)val; } return out; } /// adds some noise to the pixels template<class T> inline T* add_noise( T* im, int h, int w, int noise_level, bool in_place=false) { int sz = w*h; T* out; if( !in_place ) out = kutility::allocate<T>(sz); else out = im; for( int i=0; i<sz; i++ ) { int sign = 1; if( rand()/(double)RAND_MAX < 0.5 ) sign = -1; out[i] = im[i] + sign * rand()/(double)RAND_MAX * noise_level; } return out; } } #endif
THSTensorMath.c
#ifndef THS_GENERIC_FILE #define THS_GENERIC_FILE "generic/THSTensorMath.c" #else #define ROW_PTR2(t, r) (THTensor_(data)(t) + (r) * (t)->stride[0]) #define COL_PTR2(t, c) (THTensor_(data)(t) + (c) * (t)->stride[1]) void THSTensor_(zero)(THSTensor *self) { if (self->indices->nDimension) { THLongTensor_resizeNd(self->indices, 0, NULL, NULL); } if (self->values->nDimension) { THTensor_(resizeNd)(self->values, 0, NULL, NULL); } self->nnz = 0; } void THSTensor_(mul)(THSTensor *r_, THSTensor *t, real value) { if (r_ == t) { THTensor *r_values_ = THSTensor_(newValues)(r_); THTensor_(mul)(r_values_, r_values_, value); THTensor_(free)(r_values_); } else { THSTensor_(resizeAs)(r_, t); THLongTensor *r_indices_ = THSTensor_(newIndices)(r_); THTensor *r_values_ = THSTensor_(newValues)(r_); THLongTensor *t_indices_ = THSTensor_(newIndices)(t); THTensor *t_values_ = THSTensor_(newValues)(t); THLongTensor_resizeAs(r_indices_, t_indices_); THLongTensor_copy(r_indices_, t_indices_); THTensor_(mul)(r_values_, t_values_, value); r_->nnz = t->nnz; r_->coalesced = t->coalesced; THLongTensor_free(r_indices_); THTensor_(free)(r_values_); THLongTensor_free(t_indices_); THTensor_(free)(t_values_); } } /* floating point only, because that is what TH supports */ /* TODO: add in-place support */ #if defined(THS_REAL_IS_FLOAT) || defined(THS_REAL_IS_DOUBLE) void THSTensor_(pow)(THSTensor *r_, THSTensor *t_, real value) { if (value == 0) { THError("cannot raise to zeroth power on sparse tensor"); } THSTensor* t = THSTensor_(newCoalesce)(t_); THSTensor_(resizeAs)(r_, t); THLongTensor *r_indices_ = THSTensor_(newIndices)(r_); THTensor *r_values_ = THSTensor_(newValues)(r_); THLongTensor *t_indices_ = THSTensor_(newIndices)(t); THTensor *t_values_ = THSTensor_(newValues)(t); THLongTensor_resizeAs(r_indices_, t_indices_); THLongTensor_copy(r_indices_, t_indices_); THTensor_(pow)(r_values_, t_values_, value); r_->nnz = t->nnz; r_->coalesced = t->coalesced; THLongTensor_free(r_indices_); THTensor_(free)(r_values_); THLongTensor_free(t_indices_); THTensor_(free)(t_values_); THSTensor_(free)(t); } #endif void THSTensor_(div)(THSTensor *r_, THSTensor *t, real value) { if (r_ == t) { THTensor *r_values_ = THSTensor_(newValues)(r_); THTensor_(div)(r_values_, r_values_, value); THTensor_(free)(r_values_); } else { THSTensor_(resizeAs)(r_, t); THLongTensor *r_indices_ = THSTensor_(newIndices)(r_); THTensor *r_values_ = THSTensor_(newValues)(r_); THLongTensor *t_indices_ = THSTensor_(newIndices)(t); THTensor *t_values_ = THSTensor_(newValues)(t); THLongTensor_resizeAs(r_indices_, t_indices_); THLongTensor_copy(r_indices_, t_indices_); THTensor_(div)(r_values_, t_values_, value); r_->nnz = t->nnz; r_->coalesced = t->coalesced; THLongTensor_free(r_indices_); THTensor_(free)(r_values_); THLongTensor_free(t_indices_); THTensor_(free)(t_values_); } } void THSTensor_(cadd)(THSTensor *r_, THSTensor *t, real value, THSTensor *src) { if(!THSTensor_(isSameSizeAs)(t, src)) { THError("cadd operands have incompatible sizes or dimension types"); } if (src->nnz == 0) { THSTensor_(copy)(r_, t); return; } if (t->nnz == 0) { THSTensor_(mul)(r_, src, value); return; } // saving those because they can be overwritten when doing in-place operations ptrdiff_t t_nnz = t->nnz, s_nnz = src->nnz, max_nnz = t_nnz + s_nnz; int64_t nDimI = THSTensor_(nDimensionI)(src); int64_t nDimV = THSTensor_(nDimensionV)(src); THLongTensor *t_indices_ = THSTensor_(newIndices)(t); THTensor *t_values_ = THSTensor_(newValues)(t); THLongTensor *src_indices_ = THSTensor_(newIndices)(src); THTensor *s_values_ = THSTensor_(newValues)(src); THLongTensor *r_indices_ = THLongTensor_newWithSize2d(nDimI, max_nnz); THTensor *r_values_ = THSTensor_(newValuesWithSizeOf)(s_values_, max_nnz); THTensor_(zero)(r_values_); THSTensor_(resizeAs)(r_, src); THSTensor_(_move)(r_, r_indices_, r_values_); int64_t blockSize = r_values_->stride[0]; int64_t cmp, d; int64_t r_i = 0, t_i = 0, s_i = 0; while (t_i < t_nnz || s_i < s_nnz) { if (t_i >= t_nnz) { cmp = -1; } else if (s_i >= s_nnz) { cmp = 1; } else { cmp = 0; for (d = 0; d < nDimI; d++) { if (THTensor_fastGet2d(t_indices_, d, t_i) < THTensor_fastGet2d(src_indices_, d, s_i)) { cmp = 1; break; } if (THTensor_fastGet2d(t_indices_, d, t_i) > THTensor_fastGet2d(src_indices_, d, s_i)) { cmp = -1; break; } } } if (cmp >= 0) { for (d = 0; d < nDimI; d++) { THTensor_fastSet2d(r_indices_, d, r_i, THTensor_fastGet2d(t_indices_, d, t_i)); } THBlas_(axpy)(blockSize, 1, THTensor_(data)(t_values_) + t_i * blockSize, 1, THTensor_(data)(r_values_) + r_i * blockSize, 1); t_i++; } if (cmp <= 0) { for (d = 0; d < nDimI; d++) { THTensor_fastSet2d(r_indices_, d, r_i, THTensor_fastGet2d(src_indices_, d, s_i)); } THBlas_(axpy)(blockSize, value, THTensor_(data)(s_values_) + s_i * blockSize, 1, THTensor_(data)(r_values_) + r_i * blockSize, 1); s_i++; } r_i++; } r_->nnz = r_i; r_->coalesced = 1; THLongTensor_free(t_indices_); THTensor_(free)(t_values_); THLongTensor_free(src_indices_); THTensor_(free)(s_values_); } void THSTensor_(csub)(THSTensor *r_, THSTensor *t, real value, THSTensor *src) { THSTensor_(cadd)(r_, t, -value, src); } void THSTensor_(cmul)(THSTensor *r_, THSTensor *t_, THSTensor *src_) { if(!THSTensor_(isSameSizeAs)(t_, src_)) { THError("cadd operands have incompatible sizes or dimension types"); } if (src_->nnz == 0 || t_->nnz == 0) { THSTensor_(zero)(r_); return; } THSTensor *t = THSTensor_(newCoalesce)(t_); THSTensor *src = THSTensor_(newCoalesce)(src_); // saving those because they can be overwritten when doing in-place operations ptrdiff_t t_nnz = t->nnz, s_nnz = src->nnz; ptrdiff_t max_nnz = t_nnz < s_nnz ? t_nnz : s_nnz; int64_t nDimI = THSTensor_(nDimensionI)(src); int64_t nDimV = THSTensor_(nDimensionV)(src); THLongTensor *t_indices_ = THSTensor_(newIndices)(t); THTensor *t_values_ = THSTensor_(newValues)(t); THLongTensor *src_indices_ = THSTensor_(newIndices)(src); THTensor *s_values_ = THSTensor_(newValues)(src); THLongTensor *r_indices_ = THLongTensor_newWithSize2d(nDimI, max_nnz); THTensor *r_values_ = THSTensor_(newValuesWithSizeOf)(s_values_, max_nnz); THTensor_(zero)(r_values_); THSTensor_(resizeAs)(r_, src); THSTensor_(_move)(r_, r_indices_, r_values_); THTensor *src1Buffer = THTensor_(new)(); THTensor *src2Buffer = THTensor_(new)(); THTensor *dstBuffer = THTensor_(new)(); int64_t match, d; int64_t r_i = 0, t_i = 0, s_i = 0; while (t_i < t_nnz && s_i < s_nnz) { match = 1; for (d = 0; d < nDimI; d++) { if (THTensor_fastGet2d(t_indices_, d, t_i) < THTensor_fastGet2d(src_indices_, d, s_i)) { t_i++; match = 0; break; } if (THTensor_fastGet2d(t_indices_, d, t_i) > THTensor_fastGet2d(src_indices_, d, s_i)) { s_i++; match = 0; break; } } if (!match) continue; for (d = 0; d < nDimI; d++) { THTensor_fastSet2d(r_indices_, d, r_i, THTensor_fastGet2d(t_indices_, d, t_i)); } THSTensor_(mulSlice)(dstBuffer, src1Buffer, src2Buffer, r_values_, t_values_, s_values_, 0, r_i, t_i, s_i); r_i++; t_i++; s_i++; } r_->nnz = r_i; r_->coalesced = 1; THLongTensor_free(t_indices_); THTensor_(free)(t_values_); THLongTensor_free(src_indices_); THTensor_(free)(s_values_); THTensor_(free)(src1Buffer); THTensor_(free)(src2Buffer); THTensor_(free)(dstBuffer); THSTensor_(free)(t); THSTensor_(free)(src); } void THTensor_(spaddcmul)(THTensor *r_, THTensor *t, real value, THSTensor *src1, THSTensor *src2) { THSTensor *intermediate = THSTensor_(new)(); THSTensor_(cmul)(intermediate, src1, src2); THSTensor_(spcadd)(r_, t, value, intermediate); THSTensor_(free)(intermediate); } THLongTensor *THSTensor_(toCSR)(int64_t const *indices, int64_t dim, int64_t nnz) { int64_t h, i, hp0, hp1; THLongTensor *csr = THLongTensor_newWithSize1d(dim + 1); THLongTensor_zero(csr); // Convert the sparse matrix to CSR format #pragma omp parallel for private(i, h, hp0, hp1) schedule(static) if (nnz > 10000) for (i=0; i<nnz; i++) { hp0 = indices[i]; hp1 = (i+1 == nnz) ? dim : indices[i+1]; if (hp0 != hp1) for (h = hp0; h < hp1; h++) { THTensor_fastSet1d(csr, h+1, i+1); } } return csr; } void THSTensor_(spaddmm)(THTensor *r_, real beta, THTensor *t, real alpha, THSTensor *sparse_, THTensor *dense) { int64_t h, i; int64_t dim_i, dim_j, dim_k; // ixj * jxk = ixk int64_t nnz; THLongTensor *csr, *indices; THTensor *values; THArgCheck(sparse_->nDimensionI == 2, 2, "matrices expected, got %dD tensor", sparse_->nDimensionI); THArgCheck(sparse_->nDimensionV == 0, 2, "scalar values expected, got %dD values", sparse_->nDimensionV); THArgCheck(dense->nDimension == 2, 2, "matrices expected, got %dD tensor", dense->nDimension); THSTensor *sparse = THSTensor_(newCoalesce)(sparse_); dim_i = THSTensor_(size)(sparse, 0); dim_j = THSTensor_(size)(sparse, 1); dim_k = THTensor_(size)(dense, 1); THTensor_(resize2d)(r_, dim_i, dim_k); THArgCheck(THTensor_(size)(dense, 0) == dim_j, 3, "Expected dim 0 size %d, got %d", dim_j, THTensor_(size)(dense, 0)); THArgCheck(THTensor_(size)(t, 0) == dim_i, 1, "Expected dim 0 size %d, got %d", dim_i, THTensor_(size)(t, 0)); THArgCheck(THTensor_(size)(t, 1) == dim_k, 1, "Expected dim 1 size %d, got %d", dim_k, THTensor_(size)(t, 1)); nnz = THSTensor_(nnz)(sparse); indices = THSTensor_(newIndices)(sparse); values = THSTensor_(newValues)(sparse); csr = THSTensor_(toCSR)(THLongTensor_data(indices), dim_i, nnz); // r_ = alpha * sparse * dense if (beta == 0) { THTensor_(zero)(r_); } else if (beta == 1) { if (r_ != t) { THTensor_(copy)(r_, t); } } else { THTensor_(mul)(r_, t, beta); } #pragma omp parallel for private(h, i) schedule(static) if (nnz > 10000) for (h = 0; h < dim_i; h++) { int64_t i_start = THTensor_fastGet1d(csr, h); int64_t i_end = THTensor_fastGet1d(csr, h+1); for (i = i_start; i < i_end; i++) { real val = THTensor_fastGet1d(values, i); int64_t col = THTensor_fastGet2d(indices, 1, i); if (col >= 0 && col < dim_j) { THBlas_(axpy)(dim_k, alpha * val, ROW_PTR2(dense, col), dense->stride[1], ROW_PTR2(r_, h), r_->stride[1]); } else { THError("index out of bound. spmm: %d not between 1 and %d", col, dim_j); } } } THLongTensor_free(csr); THLongTensor_free(indices); THTensor_(free)(values); THSTensor_(free)(sparse); } void THSTensor_(sspaddmm)(THSTensor *r_, real beta, THSTensor *t, real alpha, THSTensor *sparse_, THTensor *dense) { int64_t h, i, p; int64_t dim_i, dim_j, dim_k; // ixj * jxk = ixk int64_t nnz, r_nnz, t_nnz; THLongTensor *csr, *indices, *newi, *narrowi; THTensor *values, *newv, *narrowv; THArgCheck(sparse_->nDimensionI == 2, 2, "matrices expected, got %dD tensor", sparse_->nDimensionI); THArgCheck(sparse_->nDimensionV == 0, 2, "scalar values expected, got %dD values", sparse_->nDimensionV); THArgCheck(dense->nDimension == 2, 2, "matrices expected, got %dD tensor", dense->nDimension); THSTensor *sparse = THSTensor_(newCoalesce)(sparse_); dim_i = THSTensor_(size)(sparse, 0); dim_j = THSTensor_(size)(sparse, 1); dim_k = THTensor_(size)(dense, 1); THSTensor_(resize2d)(r_, dim_i, dim_k); THArgCheck(THTensor_(size)(dense, 0) == dim_j, 3, "Expected dim 0 size %d, got %d", dim_j, THTensor_(size)(dense, 0)); THArgCheck(THSTensor_(size)(t, 0) == dim_i, 1, "Expected dim 0 size %d, got %d", dim_i, THSTensor_(size)(t, 0)); THArgCheck(THSTensor_(size)(t, 1) == dim_k, 1, "Expected dim 1 size %d, got %d", dim_k, THSTensor_(size)(t, 1)); nnz = THSTensor_(nnz)(sparse); indices = THSTensor_(newIndices)(sparse); values = THSTensor_(newValues)(sparse); csr = THSTensor_(toCSR)(THLongTensor_data(indices), dim_i, nnz); t_nnz = THSTensor_(nnz)(t); r_nnz = nnz * dim_k + t_nnz; newi = THLongTensor_newWithSize2d(2, r_nnz); newv = THTensor_(newWithSize1d)(r_nnz); THTensor_(zero)(newv); if (t_nnz != 0) { narrowi = THLongTensor_newNarrow(newi, 1, 0, t_nnz); narrowv = THTensor_(newNarrow)(newv, 0, 0, t_nnz); THLongTensor_copy(narrowi, THSTensor_(newIndices)(t)); THTensor_(copy)(narrowv, THSTensor_(newValues)(t)); THTensor_(mul)(newv, newv, beta); THLongTensor_free(narrowi); THTensor_(free)(narrowv); } // sparse = sparse * dense p = t_nnz; for (h = 0; h < dim_i; h++) { int64_t i_start = THTensor_fastGet1d(csr, h); int64_t i_end = THTensor_fastGet1d(csr, h+1); for (i = i_start; i < i_end; i++) { real val = THTensor_fastGet1d(values, i); int64_t col = THTensor_fastGet2d(indices, 1, i); if (col >= 0 && col < dim_j) { THBlas_(axpy)(dim_k, alpha * val, ROW_PTR2(dense, col), dense->stride[1], ROW_PTR2(newv, p), 1); } else { THError("index out of bound. sspmm: %d not between 1 and %d", col, dim_j); } } // Fill up the indices with the right values if (i_start != i_end) { for (i = 0; i < dim_k; i++) { THTensor_fastSet2d(newi, 0, p + i, h); THTensor_fastSet2d(newi, 1, p + i, i); } p += dim_k; } } // to avoid a clone r_->indices = newi; r_-> values = newv; r_-> nnz = p; THLongTensor_free(csr); THLongTensor_free(indices); THTensor_(free)(values); THSTensor_(free)(sparse); } void THSTensor_(hspmm)(THSTensor *r_, real alpha, THSTensor *sparse_, THTensor *dense) { THArgCheck(sparse_->nDimensionI == 2, 2, "matrices expected, got %dD tensor", sparse_->nDimensionI); THArgCheck(sparse_->nDimensionV == 0, 2, "scalar values expected, got %dD values", sparse_->nDimensionV); THArgCheck(dense->nDimension == 2, 2, "matrices expected, got %dD tensor", dense->nDimension); int64_t m = THSTensor_(size)(sparse_, 0); int64_t k = THSTensor_(size)(sparse_, 1); int64_t n = THTensor_(size)(dense, 1); THArgCheck(THTensor_(size)(dense, 0) == k, 3, "Expected dim 0 size %d, got %d", k, THTensor_(size)(dense, 0)); int64_t size[2] = {m, n}; THSTensor_(rawResize)(r_, 1, 1, size); THSTensor *sparse = THSTensor_(newCoalesce)(sparse_); int64_t nnz = THSTensor_(nnz)(sparse); THLongTensor *indices = THLongTensor_newWithSize2d(1, nnz); // Initialize the sparse matrix that will be used with spaddmm to send rows // from the dense matrix to rows of the output's value tensor THSTensor *newSparse = THSTensor_(newClone)(sparse); THLongTensor *spIndices = THSTensor_(newIndices)(newSparse); THLongTensor *valueIndices = THLongTensor_new(); THLongTensor_select(valueIndices, spIndices, 0, 0); // Compute output indices int64_t i = -1, prevIdx = -1; for (int64_t j = 0; j < nnz; j++) { int64_t currIdx = THTensor_fastGet1d(valueIndices, j); if (currIdx != prevIdx) { THTensor_fastSet2d(indices, 0, ++i, currIdx); prevIdx = currIdx; } THTensor_fastSet1d(valueIndices, j, i); } int64_t outNnz = i + 1; THLongTensor_resize2d(indices, 1, outNnz); THTensor *values = THTensor_(newWithSize2d)(outNnz, n); newSparse->size[0] = outNnz; // Compute output values tensor with sparse * dense multiplication THSTensor_(spaddmm)(values, 0, values, alpha, newSparse, dense); THSTensor_(_move)(r_, indices, values); THSTensor_(free)(newSparse); THLongTensor_free(spIndices); THLongTensor_free(valueIndices); THSTensor_(free)(sparse); } void THSTensor_(spcadd)(THTensor *r_, THTensor *dense, real value, THSTensor *sparse_) { THTensor_(resizeAs)(r_, dense); THSTensor *sparse = THSTensor_(newCoalesce)(sparse_); int64_t k; THLongTensor *indices = THSTensor_(newIndices)(sparse); THTensor *values = THSTensor_(newValues)(sparse); THLongStorage *storage = THSTensor_(newSizeOf)(sparse); int64_t *sizes = storage->data; int64_t nDim = THTensor_(nDimension)(dense); int64_t nDimI = THSTensor_(nDimensionI)(sparse); if (r_ != dense) THTensor_(copy)(r_, dense); if (nDim > nDimI) { THTensor *srcBuffer = THTensor_(new)(); THTensor *dstBuffer = THTensor_(new)(); for (k = 0; k < sparse->nnz; k++) { THTensor_(set)(dstBuffer, r_); for (int64_t d = 0; d < sparse->nDimensionI; d++) { THTensor_(select)(dstBuffer, dstBuffer, 0, THTensor_fastGet2d(indices, d, k)); } THTensor_(select)(srcBuffer, values, 0, k); THTensor_(cadd)(dstBuffer, dstBuffer, value, srcBuffer); } THTensor_(free)(srcBuffer); THTensor_(free)(dstBuffer); } else { #pragma omp parallel for private(k) for (k = 0; k < sparse->nnz; k++) { int64_t index = r_->storageOffset; for (int64_t d = 0; d < sparse->nDimensionI; d++) { index += r_->stride[d] * THTensor_fastGet2d(indices, d, k); } r_->storage->data[index] += value * THTensor_fastGet1d(values, k); } } THLongTensor_free(indices); THTensor_(free)(values); THLongStorage_free(storage); THSTensor_(free)(sparse); } #undef ROW_PTR2 #undef COL_PTR2 #endif
groupnorm_tpp.h
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Kirill Voronin (Intel Corp.) ******************************************************************************/ #include <libxsmm.h> #include <libxsmm_sync.h> #include <libxsmm_intrinsics_x86.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) # include <omp.h> #endif #define BITS_PER_CHAR (8) typedef enum my_gn_fuse { MY_GN_FUSE_NONE = 0, MY_GN_FUSE_RELU = 1, MY_GN_FUSE_ELTWISE = 2, MY_GN_FUSE_ELTWISE_RELU = 3, MY_GN_FUSE_RELU_WITH_MASK = 4, MY_GN_FUSE_ELTWISE_RELU_WITH_MASK = 5 } my_gn_fuse; typedef struct my_gn_fwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint G; libxsmm_blasint H; libxsmm_blasint W; libxsmm_blasint bc; libxsmm_blasint CP; libxsmm_blasint num_HW_blocks; libxsmm_blasint threads; size_t scratch_size; libxsmm_barrier* barrier; libxsmm_matrix_eqn_function func10; libxsmm_meltwfunction_unary reduce_HW_kernel; libxsmm_meltwfunction_unary reduce_rows_kernel; libxsmm_meltwfunction_unary reduce_groups_kernel; libxsmm_meltwfunction_unary all_zero_G_kernel; libxsmm_meltwfunction_unary all_zero_kernel; libxsmm_meltwfunction_binary add_kernel; libxsmm_meltwfunction_unary relu_kernel; libxsmm_meltwfunction_binary ewise_add_kernel; my_gn_fuse fuse_type; } my_gn_fwd_config; typedef struct my_gn_bwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint G; libxsmm_blasint H; libxsmm_blasint W; libxsmm_blasint bc; libxsmm_blasint CP; libxsmm_blasint num_HW_blocks; libxsmm_blasint threads; size_t scratch_size; libxsmm_barrier* barrier; libxsmm_matrix_eqn_function dgamma_func; libxsmm_matrix_eqn_function dbeta_func; libxsmm_matrix_eqn_function db_func; libxsmm_matrix_eqn_function ds_func; libxsmm_matrix_eqn_function din_func; libxsmm_meltwfunction_unary all_zero_kernel; libxsmm_meltwfunction_binary add_kernel; libxsmm_meltwfunction_unary inv_relu_kernel; libxsmm_meltwfunction_unary ewise_copy_kernel; my_gn_fuse fuse_type; } my_gn_bwd_config; my_gn_fwd_config setup_my_gn_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint H, libxsmm_blasint W, libxsmm_blasint G, libxsmm_blasint bc, libxsmm_blasint threads, my_gn_fuse fuse_type ) { my_gn_fwd_config res; libxsmm_blasint ldo = bc; libxsmm_blasint ld = bc; libxsmm_blasint tmp_ld, tmp_ld2; libxsmm_blasint my_eqn10; libxsmm_meltw_unary_shape unary_shape; libxsmm_meltw_binary_shape binary_shape; libxsmm_bitfield unary_flags; libxsmm_bitfield binary_flags; libxsmm_bitfield ternary_flags; libxsmm_datatype dtype = LIBXSMM_DATATYPE_F32; libxsmm_meqn_arg_shape eqn_out_arg_shape; libxsmm_meqn_arg_shape arg_shape[128]; libxsmm_matrix_arg_attributes arg_singular_attr; libxsmm_matrix_eqn_arg_metadata arg_metadata[128]; libxsmm_matrix_eqn_op_metadata op_metadata[128]; arg_singular_attr.type = LIBXSMM_MATRIX_ARG_TYPE_SINGULAR; memset( &res, 0, sizeof(res)); /* setting up some handle values */ res.N = N; res.C = C; res.G = G; res.H = H; res.W = W; res.bc = bc; res.CP = res.C / res.bc; res.num_HW_blocks = (res.H > res.W ? res.H : res.W ); res.threads = threads; res.fuse_type = fuse_type; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ ldo = res.G; unary_shape = libxsmm_create_meltw_unary_shape(res.G, 1, res.G, ldo, dtype, dtype, dtype); unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; res.all_zero_G_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_XOR, unary_shape, unary_flags); if ( res.all_zero_G_kernel == NULL) { fprintf( stderr, "JIT for initialization by unary all zero group copy kernel failed for fwd. Bailing...!\n"); exit(-1); } ldo = res.bc; unary_shape = libxsmm_create_meltw_unary_shape(res.bc, 1, res.bc, ldo, dtype, dtype, dtype); unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; res.all_zero_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_XOR, unary_shape, unary_flags); if ( res.all_zero_G_kernel == NULL) { fprintf( stderr, "JIT for initialization by unary all zero copy kernel failed for fwd. Bailing...!\n"); exit(-1); } if (res.fuse_type == 1 || res.fuse_type == 3 || res.fuse_type == 4 || res.fuse_type == 5) { if ((res.fuse_type == 4 || res.fuse_type == 5) && res.bc % 16 != 0) { fprintf( stderr, "Fused ReLU with a mask does not work for sizes which are not a multiple of 16 (2BYTE limitation). Bailing...!\n"); exit(-1); } unary_shape = libxsmm_create_meltw_unary_shape(res.bc, res.H*res.W / res.num_HW_blocks, ldo, ldo, dtype, dtype, dtype); unary_flags = ( (res.fuse_type == 4 || res.fuse_type == 5) ? LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT : LIBXSMM_MELTW_FLAG_UNARY_NONE); res.relu_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_RELU, unary_shape, unary_flags); if ( res.relu_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_relu_kernel failed. Bailing...!\n"); exit(-1); } } if (res.fuse_type == 2 || res.fuse_type == 3 || res.fuse_type == 5) { binary_shape = libxsmm_create_meltw_binary_shape(res.bc, res.H*res.W / res.num_HW_blocks, ldo, ldo, ldo, dtype, dtype, dtype); binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; res.ewise_add_kernel = libxsmm_dispatch_meltw_binary_v2(LIBXSMM_MELTW_TYPE_BINARY_ADD, binary_shape, binary_flags); if ( res.ewise_add_kernel == NULL) { fprintf( stderr, "JIT for TPP fwd ewise_add_kernel failed. Bailing...!\n"); exit(-1); } } /* TPPs for reducing X and X2 in HW*/ ld = res.bc; tmp_ld = res.bc; unary_shape = libxsmm_create_meltw_unary_shape(res.bc, res.H*res.W / res.num_HW_blocks, ld, tmp_ld, dtype, dtype, dtype); unary_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS; res.reduce_HW_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_X2_OP_ADD, unary_shape, unary_flags); if ( res.reduce_HW_kernel == NULL) { fprintf( stderr, "JIT for initialization of reduce_HW_kernel failed for fwd. Bailing...!\n"); exit(-1); } binary_shape = libxsmm_create_meltw_binary_shape(res.bc, 1, ld, ld, ld, dtype, dtype, dtype); binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; res.add_kernel = libxsmm_dispatch_meltw_binary_v2(LIBXSMM_MELTW_TYPE_BINARY_ADD, binary_shape, binary_flags); if ( res.add_kernel == NULL) { fprintf( stderr, "JIT for initialization of add_kernel failed for fwd. Bailing...!\n"); exit(-1); } /* TPP for reducing groups */ libxsmm_blasint group_size = res.C/res.G; ld = group_size; /* group_size = (CP*bc)/G */ tmp_ld = 1; unary_shape = libxsmm_create_meltw_unary_shape(group_size, 1, ld, tmp_ld, dtype, dtype, dtype); unary_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_ROWS; res.reduce_groups_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, unary_shape, unary_flags); if ( res.reduce_groups_kernel == NULL) { fprintf( stderr, "JIT for initialization of reduce_groups_kernel failed for fwd. Bailing...!\n"); exit(-1); } ld = res.bc; tmp_ld = 1; unary_shape = libxsmm_create_meltw_unary_shape(res.bc, 1, ld, tmp_ld, dtype, dtype, dtype); unary_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_ROWS; res.reduce_rows_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, unary_shape, unary_flags); if ( res.reduce_rows_kernel == NULL) { fprintf( stderr, "JIT for initialization of reduce_rows_kernel failed for fwd. Bailing...!\n"); exit(-1); } /* TPP for forward */ ld = res.bc; tmp_ld = 1; tmp_ld2 = 1; my_eqn10 = libxsmm_matrix_eqn_create(); /* y = (s*x + b)*gamma + beta */ ternary_flags = LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT; op_metadata[0].eqn_idx = my_eqn10; op_metadata[0].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_ternary_op_v2(op_metadata[0], LIBXSMM_MELTW_TYPE_TERNARY_MULADD, dtype, ternary_flags); ternary_flags = LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT; op_metadata[1].eqn_idx = my_eqn10; op_metadata[1].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_ternary_op_v2(op_metadata[1], LIBXSMM_MELTW_TYPE_TERNARY_MULADD, dtype, ternary_flags); arg_metadata[0].eqn_idx = my_eqn10; arg_metadata[0].in_arg_pos = 0; arg_shape[0].m = res.bc; /* x = [HW, bc] */ arg_shape[0].n = res.H*res.W /res.num_HW_blocks; arg_shape[0].ld = ld; arg_shape[0].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[0], arg_shape[0], arg_singular_attr); arg_metadata[1].eqn_idx = my_eqn10; arg_metadata[1].in_arg_pos = 1; arg_shape[1].m = res.bc; /* s = [bc] */ arg_shape[1].n = 1; arg_shape[1].ld = tmp_ld; arg_shape[1].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[1], arg_shape[1], arg_singular_attr); arg_metadata[2].eqn_idx = my_eqn10; arg_metadata[2].in_arg_pos = 2; arg_shape[2].m = res.bc; /* b = [bc] */ arg_shape[2].n = 1; arg_shape[2].ld = tmp_ld; arg_shape[2].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[2], arg_shape[2], arg_singular_attr); arg_metadata[3].eqn_idx = my_eqn10; arg_metadata[3].in_arg_pos = 3; arg_shape[3].m = res.bc; /* gamma = [bc] */ arg_shape[3].n = 1; arg_shape[3].ld = tmp_ld2; arg_shape[3].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[3], arg_shape[3], arg_singular_attr); arg_metadata[4].eqn_idx = my_eqn10; arg_metadata[4].in_arg_pos = 4; arg_shape[4].m = res.bc; /* beta = [bc] */ arg_shape[4].n = 1; arg_shape[4].ld = tmp_ld2; arg_shape[4].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[4], arg_shape[4], arg_singular_attr); eqn_out_arg_shape.m = res.bc; /* y = [HW, bc] */ eqn_out_arg_shape.n = res.H*res.W / res.num_HW_blocks; eqn_out_arg_shape.ld = ld; eqn_out_arg_shape.type = dtype; /* libxsmm_matrix_eqn_tree_print( my_eqn10 ); */ /* libxsmm_matrix_eqn_rpn_print ( my_eqn10 ); */ res.func10 = libxsmm_dispatch_matrix_eqn_v2( my_eqn10, eqn_out_arg_shape ); if ( res.func10 == NULL) { fprintf( stderr, "JIT for TPP fwd func10 (eqn10) failed. Bailing...!\n"); exit(-1); } /* init scratch (currently is not needed for the groupnorm fwd) */ res.scratch_size = 0; return res; } my_gn_bwd_config setup_my_gn_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint H, libxsmm_blasint W, libxsmm_blasint G, libxsmm_blasint bc, libxsmm_blasint threads, my_gn_fuse fuse_type ) { my_gn_bwd_config res; size_t dbeta_N_offset; libxsmm_blasint ldo = bc; libxsmm_blasint ld = bc; libxsmm_blasint tmp_ld2; libxsmm_blasint my_eqn11, my_eqn12, my_eqn13, my_eqn14, my_eqn15; libxsmm_meltw_unary_shape unary_shape; libxsmm_meltw_binary_shape binary_shape; libxsmm_bitfield unary_flags; libxsmm_bitfield binary_flags; libxsmm_bitfield ternary_flags; libxsmm_datatype dtype = LIBXSMM_DATATYPE_F32; libxsmm_meqn_arg_shape eqn_out_arg_shape; libxsmm_meqn_arg_shape arg_shape[128]; libxsmm_matrix_arg_attributes arg_singular_attr; libxsmm_matrix_eqn_arg_metadata arg_metadata[128]; libxsmm_matrix_eqn_op_metadata op_metadata[128]; arg_singular_attr.type = LIBXSMM_MATRIX_ARG_TYPE_SINGULAR; memset( &res, 0, sizeof(res)); /* setting up some handle values */ res.N = N; res.C = C; res.G = G; res.H = H; res.W = W; res.bc = bc; res.CP = res.C / res.bc; res.num_HW_blocks = (res.H > res.W ? res.H : res.W ); res.threads = threads; res.fuse_type = fuse_type; /* when masking is on, bc must be divisible by 8 for compressing mask into char array (otherwise strides are wrong for relumask */ if ( (res.fuse_type == 4 || res.fuse_type == 5) && (res.bc % BITS_PER_CHAR != 0)) { fprintf( stderr, "bc = %d is not divisible by BITS_PER_CHAR = %d. Bailing...!\n", res.bc, BITS_PER_CHAR); exit(-1); } /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); ldo = res.bc; unary_shape = libxsmm_create_meltw_unary_shape(res.bc, 1, res.bc, ldo, dtype, dtype, dtype); unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; res.all_zero_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_XOR, unary_shape, unary_flags); if ( res.all_zero_kernel == NULL) { fprintf( stderr, "JIT for initialization by unary all zero copy kernel failed for fwd. Bailing...!\n"); exit(-1); } ld = res.bc; binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; binary_shape = libxsmm_create_meltw_binary_shape(res.bc, 1, ld, ld, ld, dtype, dtype, dtype); res.add_kernel = libxsmm_dispatch_meltw_binary_v2(LIBXSMM_MELTW_TYPE_BINARY_ADD, binary_shape, binary_flags); if ( res.add_kernel == NULL) { fprintf( stderr, "JIT for initialization of add_kernel failed for fwd. Bailing...!\n"); exit(-1); } if (res.fuse_type == 1 || res.fuse_type == 3 || res.fuse_type == 4 || res.fuse_type == 5) { unary_shape = libxsmm_create_meltw_unary_shape(res.bc, res.H*res.W / res.num_HW_blocks, ldo, ldo, dtype, dtype, dtype); unary_flags = ( (res.fuse_type == 4 || res.fuse_type == 5) ? LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT : LIBXSMM_MELTW_FLAG_UNARY_NONE); res.inv_relu_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_RELU_INV, unary_shape, unary_flags); if ( res.inv_relu_kernel == NULL ) { fprintf( stderr, "JIT for TPP bwd inv_relu_kernel failed. Bailing...!\n"); exit(-1); } } if (res.fuse_type == 2 || res.fuse_type == 3 || res.fuse_type == 5) { unary_shape = libxsmm_create_meltw_unary_shape(res.bc, res.H*res.W / res.num_HW_blocks, ldo, ldo, dtype, dtype, dtype); unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; res.ewise_copy_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_IDENTITY, unary_shape, unary_flags); if ( res.ewise_copy_kernel == NULL) { fprintf( stderr, "JIT for TPP bwd ewise_copy_kernel failed. Bailing...!\n"); exit(-1); } } /* Group norm equations */ /* Create MatEq for bwd layernorm */ ld = res.bc; tmp_ld2 = 1; /* dgamma function */ my_eqn11 = libxsmm_matrix_eqn_create(); /* dgamma = ((inp *a + b) * dout) + dgamma */ binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; op_metadata[0].eqn_idx = my_eqn11; op_metadata[0].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_binary_op_v2(op_metadata[0], LIBXSMM_MELTW_TYPE_BINARY_ADD, dtype, binary_flags); unary_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS; op_metadata[1].eqn_idx = my_eqn11; op_metadata[1].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_unary_op_v2(op_metadata[1], LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, dtype, unary_flags); binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; op_metadata[2].eqn_idx = my_eqn11; op_metadata[2].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_binary_op_v2(op_metadata[2], LIBXSMM_MELTW_TYPE_BINARY_MUL, dtype, binary_flags); ternary_flags = LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT; op_metadata[3].eqn_idx = my_eqn11; op_metadata[3].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_ternary_op_v2(op_metadata[3], LIBXSMM_MELTW_TYPE_TERNARY_MULADD, dtype, ternary_flags); arg_metadata[0].eqn_idx = my_eqn11; arg_metadata[0].in_arg_pos = 0; arg_shape[0].m = res.bc; /* inp [HW, bc] */ arg_shape[0].n = res.H*res.W /res.num_HW_blocks; arg_shape[0].ld = ld; arg_shape[0].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[0], arg_shape[0], arg_singular_attr); arg_metadata[1].eqn_idx = my_eqn11; arg_metadata[1].in_arg_pos = 1; arg_shape[1].m = res.bc; /* a [bc] */ arg_shape[1].n = 1; arg_shape[1].ld = tmp_ld2; arg_shape[1].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[1], arg_shape[1], arg_singular_attr); arg_metadata[2].eqn_idx = my_eqn11; arg_metadata[2].in_arg_pos = 2; arg_shape[2].m = res.bc; /* b [bc] */ arg_shape[2].n = 1; arg_shape[2].ld = tmp_ld2; arg_shape[2].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[2], arg_shape[2], arg_singular_attr); arg_metadata[3].eqn_idx = my_eqn11; arg_metadata[3].in_arg_pos = 3; arg_shape[3].m = res.bc; /* dout [HW, bc] */ arg_shape[3].n = res.H*res.W/res.num_HW_blocks; arg_shape[3].ld = ld; arg_shape[3].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[3], arg_shape[3], arg_singular_attr); arg_metadata[4].eqn_idx = my_eqn11; arg_metadata[4].in_arg_pos = 4; arg_shape[4].m = res.bc; /* dgamma [bc] */ arg_shape[4].n = 1; arg_shape[4].ld = tmp_ld2; arg_shape[4].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[4], arg_shape[4], arg_singular_attr); eqn_out_arg_shape.m = res.bc; /* dgamma [bc] */ eqn_out_arg_shape.n = 1; eqn_out_arg_shape.ld = tmp_ld2; eqn_out_arg_shape.type = dtype; /* libxsmm_matrix_eqn_tree_print( my_eqn11 ); */ /* libxsmm_matrix_eqn_rpn_print ( my_eqn11 ); */ res.dgamma_func = libxsmm_dispatch_matrix_eqn_v2( my_eqn11, eqn_out_arg_shape ); if ( res.dgamma_func == NULL) { fprintf( stderr, "JIT for TPP fwd dgamma_func (eqn11) failed. Bailing...!\n"); exit(-1); } /* dbeta function */ my_eqn12 = libxsmm_matrix_eqn_create(); /* dbeta [bc] = dout [HW, bc] + dbeta [bc] */ binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; op_metadata[0].eqn_idx = my_eqn12; op_metadata[0].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_binary_op_v2(op_metadata[0], LIBXSMM_MELTW_TYPE_BINARY_ADD, dtype, binary_flags); unary_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS; op_metadata[1].eqn_idx = my_eqn12; op_metadata[1].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_unary_op_v2(op_metadata[1], LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, dtype, unary_flags); arg_metadata[0].eqn_idx = my_eqn12; arg_metadata[0].in_arg_pos = 3; arg_shape[0].m = res.bc; /* dout [HW, bc] */ arg_shape[0].n = res.H*res.W/res.num_HW_blocks; arg_shape[0].ld = ld; arg_shape[0].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[0], arg_shape[0], arg_singular_attr); arg_metadata[1].eqn_idx = my_eqn12; arg_metadata[1].in_arg_pos = 5; arg_shape[1].m = res.bc; /* dbeta [bc] */ arg_shape[1].n = 1; arg_shape[1].ld = tmp_ld2; arg_shape[1].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[1], arg_shape[1], arg_singular_attr); eqn_out_arg_shape.m = res.bc; /* dbeta [bc] */ eqn_out_arg_shape.n = 1; eqn_out_arg_shape.ld = tmp_ld2; eqn_out_arg_shape.type = dtype; /* libxsmm_matrix_eqn_tree_print( my_eqn12 ); */ /* libxsmm_matrix_eqn_rpn_print ( my_eqn12 ); */ res.dbeta_func = libxsmm_dispatch_matrix_eqn_v2( my_eqn12, eqn_out_arg_shape ); if ( res.dbeta_func == NULL) { fprintf( stderr, "JIT for TPP fwd dbeta_func (eqn12) failed. Bailing...!\n"); exit(-1); } /* db new equation */ my_eqn13 = libxsmm_matrix_eqn_create(); /* db [bc] = (dout * gamma) [HW, bc] + db [bc]*/ binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; op_metadata[0].eqn_idx = my_eqn13; op_metadata[0].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_binary_op_v2(op_metadata[0], LIBXSMM_MELTW_TYPE_BINARY_ADD, dtype, binary_flags); unary_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS; op_metadata[1].eqn_idx = my_eqn13; op_metadata[1].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_unary_op_v2(op_metadata[1], LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, dtype, unary_flags); binary_flags = LIBXSMM_MELTW_FLAG_BINARY_BCAST_COL_IN_1; op_metadata[2].eqn_idx = my_eqn13; op_metadata[2].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_binary_op_v2(op_metadata[2], LIBXSMM_MELTW_TYPE_BINARY_MUL, dtype, binary_flags); arg_metadata[0].eqn_idx = my_eqn13; arg_metadata[0].in_arg_pos = 3; arg_shape[0].m = res.bc; /* dout [HW, bc] */ arg_shape[0].n = res.H*res.W/res.num_HW_blocks; arg_shape[0].ld = ld; arg_shape[0].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[0], arg_shape[0], arg_singular_attr); arg_metadata[1].eqn_idx = my_eqn13; arg_metadata[1].in_arg_pos = 6; arg_shape[1].m = res.bc; /* gamma [bc] */ arg_shape[1].n = 1; arg_shape[1].ld = tmp_ld2; arg_shape[1].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[1], arg_shape[1], arg_singular_attr); arg_metadata[2].eqn_idx = my_eqn13; arg_metadata[2].in_arg_pos = 9; arg_shape[2].m = res.bc; /* db [bc] */ arg_shape[2].n = 1; arg_shape[2].ld = tmp_ld2; arg_shape[2].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[2], arg_shape[2], arg_singular_attr); eqn_out_arg_shape.m = res.bc; /* db [bc] */ eqn_out_arg_shape.n = 1; eqn_out_arg_shape.ld = tmp_ld2; eqn_out_arg_shape.type = dtype; /* libxsmm_matrix_eqn_tree_print( my_eqn13 ); */ /* libxsmm_matrix_eqn_rpn_print ( my_eqn13 ); */ res.db_func = libxsmm_dispatch_matrix_eqn_v2( my_eqn13, eqn_out_arg_shape ); if ( res.db_func == NULL) { fprintf( stderr, "JIT for TPP fwd db_func (eqn13) failed. Bailing...!\n"); exit(-1); } /* ds new equation */ my_eqn14 = libxsmm_matrix_eqn_create(); /* ds [bc] = ((dout * gamma) * inp) [HW, bc] + ds [bc] */ binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; op_metadata[0].eqn_idx = my_eqn14; op_metadata[0].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_binary_op_v2(op_metadata[0], LIBXSMM_MELTW_TYPE_BINARY_ADD, dtype, binary_flags); unary_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS; op_metadata[1].eqn_idx = my_eqn14; op_metadata[1].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_unary_op_v2(op_metadata[1], LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, dtype, unary_flags); binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; op_metadata[2].eqn_idx = my_eqn14; op_metadata[2].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_binary_op_v2(op_metadata[2], LIBXSMM_MELTW_TYPE_BINARY_MUL, dtype, binary_flags); binary_flags = LIBXSMM_MELTW_FLAG_BINARY_BCAST_COL_IN_1; op_metadata[3].eqn_idx = my_eqn14; op_metadata[3].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_binary_op_v2(op_metadata[3], LIBXSMM_MELTW_TYPE_BINARY_MUL, dtype, binary_flags); arg_metadata[0].eqn_idx = my_eqn14; arg_metadata[0].in_arg_pos = 3; arg_shape[0].m = res.bc; /* dout [HW, bc] */ arg_shape[0].n = res.H*res.W/res.num_HW_blocks; arg_shape[0].ld = ld; arg_shape[0].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[0], arg_shape[0], arg_singular_attr); arg_metadata[1].eqn_idx = my_eqn14; arg_metadata[1].in_arg_pos = 6; arg_shape[1].m = res.bc; /* gamma [bc] */ arg_shape[1].n = 1; arg_shape[1].ld = tmp_ld2; arg_shape[1].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[1], arg_shape[1], arg_singular_attr); arg_metadata[2].eqn_idx = my_eqn14; arg_metadata[2].in_arg_pos = 0; arg_shape[2].m = res.bc; /* inp [HW, bc] */ arg_shape[2].n = res.H*res.W /res.num_HW_blocks; arg_shape[2].ld = ld; arg_shape[2].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[2], arg_shape[2], arg_singular_attr); arg_metadata[3].eqn_idx = my_eqn14; arg_metadata[3].in_arg_pos = 8; arg_shape[3].m = res.bc; /* ds [bc] */ arg_shape[3].n = 1; arg_shape[3].ld = tmp_ld2; arg_shape[3].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[3], arg_shape[3], arg_singular_attr); eqn_out_arg_shape.m = res.bc; /* ds [bc] */ eqn_out_arg_shape.n = 1; eqn_out_arg_shape.ld = tmp_ld2; eqn_out_arg_shape.type = dtype; /* libxsmm_matrix_eqn_tree_print( my_eqn14 ); */ /* libxsmm_matrix_eqn_rpn_print ( my_eqn14 ); */ res.ds_func = libxsmm_dispatch_matrix_eqn_v2( my_eqn14, eqn_out_arg_shape ); if ( res.ds_func == NULL) { fprintf( stderr, "JIT for TPP fwd ds_func (eqn14) failed. Bailing...!\n"); exit(-1); } /* din equation */ my_eqn15 = libxsmm_matrix_eqn_create(); /* din = ((gamma * a) * dout) + (inp * b + c) */ ternary_flags = LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_0 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT; op_metadata[0].eqn_idx = my_eqn15; op_metadata[0].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_ternary_op_v2(op_metadata[0], LIBXSMM_MELTW_TYPE_TERNARY_MULADD, dtype, ternary_flags); binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; op_metadata[2].eqn_idx = my_eqn15; op_metadata[2].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_binary_op_v2(op_metadata[2], LIBXSMM_MELTW_TYPE_BINARY_MUL, dtype, binary_flags); arg_metadata[0].eqn_idx = my_eqn15; arg_metadata[0].in_arg_pos = 6; arg_shape[0].m = res.bc; /* gamma [bc] */ arg_shape[0].n = 1; arg_shape[0].ld = tmp_ld2; arg_shape[0].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[0], arg_shape[0], arg_singular_attr); arg_metadata[1].eqn_idx = my_eqn15; arg_metadata[1].in_arg_pos = 1; arg_shape[1].m = res.bc; /* a [bc] */ arg_shape[1].n = 1; arg_shape[1].ld = tmp_ld2; arg_shape[1].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[1], arg_shape[1], arg_singular_attr); arg_metadata[2].eqn_idx = my_eqn15; arg_metadata[2].in_arg_pos = 3; arg_shape[2].m = res.bc; /* dout [HW, bc] */ arg_shape[2].n = res.H*res.W/res.num_HW_blocks; arg_shape[2].ld = ld; arg_shape[2].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[2], arg_shape[2], arg_singular_attr); ternary_flags = LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT; op_metadata[1].eqn_idx = my_eqn15; op_metadata[1].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_ternary_op_v2(op_metadata[1], LIBXSMM_MELTW_TYPE_TERNARY_MULADD, dtype, ternary_flags); arg_metadata[3].eqn_idx = my_eqn15; arg_metadata[3].in_arg_pos = 0; arg_shape[3].m = res.bc; /* inp [HW, bc] */ arg_shape[3].n = res.H*res.W /res.num_HW_blocks; arg_shape[3].ld = ld; arg_shape[3].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[3], arg_shape[3], arg_singular_attr); arg_metadata[4].eqn_idx = my_eqn15; arg_metadata[4].in_arg_pos = 2; arg_shape[4].m = res.bc; /* b [bc] */ arg_shape[4].n = 1; arg_shape[4].ld = tmp_ld2; arg_shape[4].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[4], arg_shape[4], arg_singular_attr); arg_metadata[5].eqn_idx = my_eqn15; arg_metadata[5].in_arg_pos = 7; arg_shape[5].m = res.bc; /* c [bc] */ arg_shape[5].n = 1; arg_shape[5].ld = tmp_ld2; arg_shape[5].type = dtype; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[5], arg_shape[5], arg_singular_attr); eqn_out_arg_shape.m = res.bc; /* din [HW, bc] */ eqn_out_arg_shape.n = res.H*res.W/res.num_HW_blocks; eqn_out_arg_shape.ld = ld; eqn_out_arg_shape.type = dtype; /* libxsmm_matrix_eqn_tree_print( my_eqn16 ); */ /* libxsmm_matrix_eqn_rpn_print ( my_eqn16 ); */ res.din_func = libxsmm_dispatch_matrix_eqn_v2( my_eqn15, eqn_out_arg_shape ); if ( res.din_func == NULL) { fprintf( stderr, "JIT for TPP fwd din_func (eqn15) failed. Bailing...!\n"); exit(-1); } /* init scratch */ dbeta_N_offset = LIBXSMM_UP2(res.CP * res.N * res.bc, 64); res.scratch_size = sizeof(float) * ( dbeta_N_offset /* dbeta_N*/ + LIBXSMM_UP2(res.CP * res.N * res.bc, 64) /*dgamma_N */ ); return res; } void destroy_my_gn_fwd(my_gn_fwd_config* cfg) { libxsmm_barrier_destroy(cfg->barrier); /* when/if libxsmm_matrix_eqn_destroy gets added, destructors for equations should go here */ } void destroy_my_gn_bwd(my_gn_bwd_config* cfg) { libxsmm_barrier_destroy(cfg->barrier); } void my_gn_fwd_exec( my_gn_fwd_config cfg, const float *pinp, const float *pinp_add, const float *pgamma, const float *pbeta, float *mean, float *var, float *pout, unsigned char *prelumask, float eps, int start_tid, int my_tid, void *scratch ) { const libxsmm_blasint N = cfg.N; const libxsmm_blasint CP = cfg.CP; const libxsmm_blasint G = cfg.G; const libxsmm_blasint HW = cfg.H * cfg.W; const libxsmm_blasint CB = cfg.bc; const libxsmm_blasint num_HW_blocks = cfg.num_HW_blocks; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could be run in parallel for 1d blocking */ /* Question: each thread should take a number of full (of length CP chunks) or can we really do a partial split here */ const libxsmm_blasint work_dN = CP * N; /* compute chunk size */ const libxsmm_blasint chunksize_dN = (work_dN % cfg.threads == 0) ? (work_dN / cfg.threads) : ((work_dN / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin_dN = (ltid * chunksize_dN < work_dN) ? (ltid * chunksize_dN) : work_dN; const libxsmm_blasint thr_end_dN = ((ltid + 1) * chunksize_dN < work_dN) ? ((ltid + 1) * chunksize_dN) : work_dN; /* number of tasks that could be run in parallel for 1d blocking over N*/ const libxsmm_blasint work_N = N; /* compute chunk size */ const libxsmm_blasint chunksize_N = (work_N % cfg.threads == 0) ? (work_N / cfg.threads) : ((work_N / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin_N = (ltid * chunksize_N < work_N) ? (ltid * chunksize_N) : work_N; const libxsmm_blasint thr_end_N = ((ltid + 1) * chunksize_N < work_N) ? ((ltid + 1) * chunksize_N) : work_N; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); LIBXSMM_VLA_DECL(4, const float, inp, pinp, CP, HW, CB); /* [N, CP, HW, CB] */ LIBXSMM_VLA_DECL(4, float, out, pout, CP, HW, CB); LIBXSMM_VLA_DECL(2, const float, gamma, pgamma, CB); /* [CP,CB] */ LIBXSMM_VLA_DECL(2, const float, beta, pbeta, CB); /* [CP,CB] */ LIBXSMM_VLA_DECL(4, const float, inp_add, pinp_add, CP, HW, CB); /* [N, CP, HW, bc] */ float alpha = 0.0f; LIBXSMM_VLA_DECL(4, unsigned char, relumask, prelumask, CP, HW, CB/BITS_PER_CHAR); /* [N, CP, HW, CB/BITS_PER_CHAR] */ int np, group_size; group_size = (CP*CB)/G; libxsmm_meltw_unary_param all_zero_param; libxsmm_meltw_binary_param add_param; libxsmm_meltw_unary_param reduce_HW_param; libxsmm_meltw_unary_param m_reduce_groups_param; libxsmm_meltw_unary_param v_reduce_groups_param; libxsmm_meltw_unary_param all_relu_param; libxsmm_matrix_arg arg_array[5]; libxsmm_matrix_eqn_param eqn_param; memset( &all_zero_param, 0, sizeof(all_zero_param)); memset( &add_param, 0, sizeof(add_param)); memset( &reduce_HW_param, 0, sizeof(reduce_HW_param)); memset( &m_reduce_groups_param, 0, sizeof(m_reduce_groups_param)); memset( &v_reduce_groups_param, 0, sizeof(v_reduce_groups_param)); memset( &all_relu_param, 0, sizeof(all_relu_param)); memset( &eqn_param, 0, sizeof(eqn_param)); eqn_param.inputs = arg_array; if (group_size <= CB){ int cp; int cpxnt; for ( cpxnt = thr_begin_dN; cpxnt < thr_end_dN; ++cpxnt ) { np = cpxnt/CP; cp = cpxnt%CP; LIBXSMM_ALIGNED(float tmp[2*CB], 64); LIBXSMM_ALIGNED(float sum_X[G], 64); LIBXSMM_ALIGNED(float sum_X2[G], 64); LIBXSMM_ALIGNED(float s[CB], 64); LIBXSMM_ALIGNED(float b[CB], 64); int i, j, hwb, g; all_zero_param.out.primary = tmp; cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &tmp[CB]; cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = sum_X; cfg.all_zero_G_kernel(&all_zero_param); all_zero_param.out.primary = sum_X2; cfg.all_zero_G_kernel(&all_zero_param); LIBXSMM_ALIGNED(float new_tmp[2*CB], 64); reduce_HW_param.out.primary = new_tmp; /* [2*CB] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ reduce_HW_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW_block, CB] -----> [2 * CB] */ cfg.reduce_HW_kernel(&reduce_HW_param); add_param.in0.primary = tmp; add_param.in1.primary = new_tmp; add_param.out.primary = tmp; cfg.add_kernel(&add_param); add_param.in0.primary = &tmp[CB]; add_param.in1.primary = &new_tmp[CB]; add_param.out.primary = &tmp[CB]; cfg.add_kernel(&add_param); /* for (cb = 0; cb < 2*CB; cb++) { */ /* tmp[cb] += new_tmp[cb]; */ /* } */ } for(i=0; i < CB; i += group_size){ g = (cp*CB + i)/group_size; /* determine current group */ m_reduce_groups_param.in.primary = &tmp[i]; m_reduce_groups_param.out.primary = &sum_X[g]; v_reduce_groups_param.in.primary = &tmp[CB + i]; v_reduce_groups_param.out.primary = &sum_X2[g]; cfg.reduce_groups_kernel(&m_reduce_groups_param); cfg.reduce_groups_kernel(&v_reduce_groups_param); mean[np*G + g] = sum_X[g] / ((float)group_size * HW); var[np*G + g] = (sum_X2[g] / ((float)group_size * HW)) - (mean[np*G + g]*mean[np*G + g]); /* var = E[X^2] - (E[X])^2 */ for(j = 0; j < group_size; j++){ s[i + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); /* 1/sqrt(var(X) + eps) */ b[i + j] = -1 * mean[np*G + g] * s[i + j]; /* -E[X]/sqrt(var(X) + eps) */ } } arg_array[1].primary = s; /* [CB] */ arg_array[2].primary = b; /* [CB] */ arg_array[3].primary = (void*)&LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); /* [CB] */ arg_array[4].primary = (void*)&LIBXSMM_VLA_ACCESS(2, beta, cp, 0, CB); /* [CB] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] */ eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, out, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] */ cfg.func10(&eqn_param); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */ /* Eltwise add */ if (cfg.fuse_type == MY_GN_FUSE_ELTWISE || cfg.fuse_type == MY_GN_FUSE_ELTWISE_RELU_WITH_MASK) { add_param.in0.primary = (void*)&LIBXSMM_VLA_ACCESS(4, out, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); add_param.in1.primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp_add, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); add_param.out.primary = (void*)&LIBXSMM_VLA_ACCESS(4, out, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); cfg.ewise_add_kernel(&add_param); } /* ReLU */ if (cfg.fuse_type == MY_GN_FUSE_RELU || cfg.fuse_type == MY_GN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_GN_FUSE_ELTWISE_RELU_WITH_MASK) { all_relu_param.op.primary = (void*)(&alpha); all_relu_param.in.primary = &LIBXSMM_VLA_ACCESS(4, out, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW,CB] */ all_relu_param.out.primary = &LIBXSMM_VLA_ACCESS(4, out, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW,CB] */ all_relu_param.out.secondary = ((cfg.fuse_type == MY_GN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_GN_FUSE_ELTWISE_RELU_WITH_MASK) ? (void*)&LIBXSMM_VLA_ACCESS(4, relumask, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, (CB/BITS_PER_CHAR)) : NULL ); cfg.relu_kernel(&all_relu_param); } /* ReLU */ } } } else{ /* Case when group_size > CB */ for ( np = thr_begin_N; np < thr_end_N; ++np ) { LIBXSMM_ALIGNED(float tmp[2*CB], 64); LIBXSMM_ALIGNED(float sum_X[G], 64); LIBXSMM_ALIGNED(float sum_X2[G], 64); LIBXSMM_ALIGNED(float s[CP*CB], 64); LIBXSMM_ALIGNED(float b[CP*CB], 64); int i, j, cp, hwb, g; float m, v; libxsmm_meltw_unary_param m_reduce_rows_param; libxsmm_meltw_unary_param v_reduce_rows_param; memset( &m_reduce_rows_param, 0, sizeof(m_reduce_rows_param)); memset( &v_reduce_rows_param, 0, sizeof(v_reduce_rows_param)); all_zero_param.out.primary = sum_X; cfg.all_zero_G_kernel(&all_zero_param); all_zero_param.out.primary = sum_X2; cfg.all_zero_G_kernel(&all_zero_param); LIBXSMM_ALIGNED(float new_tmp[2*CB], 64); for (cp = 0; cp < CP; cp++){ /* [cp, HW, CB] */ all_zero_param.out.primary = tmp; cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &tmp[CB]; cfg.all_zero_kernel(&all_zero_param); /* for (cb = 0; cb < 2*CB; cb++) { */ /* tmp[cb] = 0.0f; */ /* } */ reduce_HW_param.out.primary = new_tmp; /* [2*CB] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ reduce_HW_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] -----> [2 * CB] */ cfg.reduce_HW_kernel(&reduce_HW_param); add_param.in0.primary = tmp; add_param.in1.primary = new_tmp; add_param.out.primary = tmp; cfg.add_kernel(&add_param); add_param.in0.primary = &tmp[CB]; add_param.in1.primary = &new_tmp[CB]; add_param.out.primary = &tmp[CB]; cfg.add_kernel(&add_param); /* #pragma omp simd */ /* for (cb = 0; cb < 2*CB; cb++) { */ /* tmp[cb] += new_tmp[cb]; */ /* } */ } if (group_size >= CB){ /* Group size >= block size (Ex.- CP = 4, CB = 16, G = 2, group_size = 32) */ g = (cp*CB)/group_size; /* determine current group */ m_reduce_rows_param.in.primary = tmp; m_reduce_rows_param.out.primary = &m; v_reduce_rows_param.in.primary = &tmp[CB]; v_reduce_rows_param.out.primary = &v; cfg.reduce_rows_kernel(&m_reduce_rows_param); cfg.reduce_rows_kernel(&v_reduce_rows_param); sum_X[g] += m; sum_X2[g] += v; } else{ /* Group size < block size (Ex.- CP = 4, CB = 16, G = 32, group_size = 2) */ for(i=0; i < CB; i += group_size){ m_reduce_groups_param.in.primary = &tmp[i]; m_reduce_groups_param.out.primary = &sum_X[cp*(CB/group_size) + (i/group_size)]; v_reduce_groups_param.in.primary = &tmp[CB + i]; v_reduce_groups_param.out.primary = &sum_X2[cp*(CB/group_size) + (i/group_size)]; cfg.reduce_groups_kernel(&m_reduce_groups_param); cfg.reduce_groups_kernel(&v_reduce_groups_param); } } } /* mean and variance calculation */ for(g = 0; g < G; g++){ mean[np*G + g] = sum_X[g] / ((float)group_size * HW); var[np*G + g] = (sum_X2[g] / ((float)group_size * HW)) - (mean[np*G + g]*mean[np*G + g]); /* var = E[X^2] - (E[X])^2 */ for(j = 0; j < group_size; j++){ s[g*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); /* 1/sqrt(var(X) + eps) */ b[g*group_size + j] = -1 * mean[np*G + g] * s[g*group_size + j]; /* -E[X]/sqrt(var(X) + eps) */ } } for (cp = 0; cp < CP; cp++){ arg_array[1].primary = &s[cp*CB]; /* [CB] */ arg_array[2].primary = &b[cp*CB]; /* [CB] */ arg_array[3].primary = (void*)&LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); /* [CB] */ arg_array[4].primary = (void*)&LIBXSMM_VLA_ACCESS(2, beta, cp, 0, CB); /* [CB] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] */ eqn_param.inputs = arg_array; eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, out, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW,CB] */ cfg.func10(&eqn_param); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */ /* Eltwise add */ if (cfg.fuse_type == MY_GN_FUSE_ELTWISE || cfg.fuse_type == MY_GN_FUSE_ELTWISE_RELU_WITH_MASK) { add_param.in0.primary = (void*)&LIBXSMM_VLA_ACCESS(4, out, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); add_param.in1.primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp_add, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); add_param.out.primary = (void*)&LIBXSMM_VLA_ACCESS(4, out, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); cfg.ewise_add_kernel(&add_param); } /* ReLU */ if (cfg.fuse_type == MY_GN_FUSE_RELU || cfg.fuse_type == MY_GN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_GN_FUSE_ELTWISE_RELU_WITH_MASK) { all_relu_param.op.primary = (void*)(&alpha); all_relu_param.in.primary = &LIBXSMM_VLA_ACCESS(4, out, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW,CB] */ all_relu_param.out.primary = &LIBXSMM_VLA_ACCESS(4, out, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW,CB] */ all_relu_param.out.secondary = ((cfg.fuse_type == MY_GN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_GN_FUSE_ELTWISE_RELU_WITH_MASK) ? (void*)&LIBXSMM_VLA_ACCESS(4, relumask, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, (CB/BITS_PER_CHAR)) : NULL ); cfg.relu_kernel(&all_relu_param); } /* ReLU */ } } } } libxsmm_barrier_wait(cfg.barrier, ltid); } void my_gn_bwd_exec( my_gn_bwd_config cfg, float *pdout, const float *pinp, const float *mean, const float *var, const float *pgamma, const unsigned char *prelumask, float *pdin, float *pdin_add, float *pdgamma, float *pdbeta, float eps, int start_tid, int my_tid, void *scratch) { const libxsmm_blasint N = cfg.N; const libxsmm_blasint CP = cfg.CP; const libxsmm_blasint G = cfg.G; const libxsmm_blasint HW = cfg.H * cfg.W; const libxsmm_blasint CB = cfg.bc; const libxsmm_blasint num_HW_blocks = cfg.num_HW_blocks; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could be run in parallel for 1d blocking */ /* Question: each thread should take a number of full (of length CP chunks) or can we really do a partial split here? */ const libxsmm_blasint work_dN = N * CP; /* compute chunk size */ const libxsmm_blasint chunksize_dN = (work_dN % cfg.threads == 0) ? (work_dN / cfg.threads) : ((work_dN / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin_dN = (ltid * chunksize_dN < work_dN) ? (ltid * chunksize_dN) : work_dN; const libxsmm_blasint thr_end_dN = ((ltid + 1) * chunksize_dN < work_dN) ? ((ltid + 1) * chunksize_dN) : work_dN; /* number of tasks that could be run in parallel for 1d blocking over CP */ const libxsmm_blasint work_C = CP; /* compute chunk size */ const libxsmm_blasint chunksize_C = (work_C % cfg.threads == 0) ? (work_C / cfg.threads) : ((work_C / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin_C = (ltid * chunksize_C < work_C) ? (ltid * chunksize_C) : work_C; const libxsmm_blasint thr_end_C = ((ltid + 1) * chunksize_C < work_C) ? ((ltid + 1) * chunksize_C) : work_C; /* number of tasks that could be run in parallel for 1d blocking over N */ const libxsmm_blasint work_N = N; /* compute chunk size */ const libxsmm_blasint chunksize_N = (work_N % cfg.threads == 0) ? (work_N / cfg.threads) : ((work_N / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin_N = (ltid * chunksize_N < work_N) ? (ltid * chunksize_N) : work_N; const libxsmm_blasint thr_end_N = ((ltid + 1) * chunksize_N < work_N) ? ((ltid + 1) * chunksize_N) : work_N; libxsmm_meltw_unary_param all_zero_param; libxsmm_meltw_unary_param all_relu_param; libxsmm_meltw_unary_param ewise_copy_param; libxsmm_matrix_arg arg_array[10]; libxsmm_matrix_eqn_param eqn_param; memset( &all_zero_param, 0, sizeof(all_zero_param)); memset( &all_relu_param, 0, sizeof(all_relu_param)); memset( &ewise_copy_param, 0, sizeof(ewise_copy_param)); memset( &eqn_param, 0, sizeof(eqn_param)); eqn_param.inputs = arg_array; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); int group_size = (CP*CB)/G; const float scale = 1.0f / ((float)group_size * HW); LIBXSMM_VLA_DECL(4, float, din, pdin, CP, HW, CB); LIBXSMM_VLA_DECL(4, const float, inp, pinp, CP, HW, CB); LIBXSMM_VLA_DECL(4, float, dout, pdout, CP, HW, CB); LIBXSMM_VLA_DECL(2, const float, gamma, pgamma, CB); LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, CB); LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, CB); LIBXSMM_VLA_DECL(4, float, din_add, pdin_add, CP, HW, CB); /* [N, CP, HW, bc] */ float alpha = 0.0f; LIBXSMM_VLA_DECL(4, const unsigned char, relumask, prelumask, CP, HW, CB/BITS_PER_CHAR); /* [N, CP, HW, CB/BITS_PER_CHAR] */ const libxsmm_blasint dbeta_N_offset = (LIBXSMM_UP2((uintptr_t)(((float*)scratch) + N * CP * CB), 64) - ((uintptr_t)(scratch))) / sizeof(float); LIBXSMM_VLA_DECL(3, float, dgamma_N, ((float*)scratch), CP, CB); /* [N, CP, CB] */ LIBXSMM_ASSUME_ALIGNED(dgamma_N_, 64); LIBXSMM_VLA_DECL(3, float, dbeta_N, ((float*)scratch) + dbeta_N_offset, CP, CB); /* [N, CP, CB] */ LIBXSMM_ASSUME_ALIGNED(dbeta_N_, 64); if (group_size <= CB){ LIBXSMM_ALIGNED(float a[CB], 64); LIBXSMM_ALIGNED(float b[CB], 64); LIBXSMM_ALIGNED(float c[CB], 64); LIBXSMM_ALIGNED(float ds[CB], 64); LIBXSMM_ALIGNED(float db[CB], 64); int np, cp; int cpxnt; for ( cpxnt = thr_begin_dN; cpxnt < thr_end_dN; ++cpxnt ) { np = cpxnt/CP; cp = cpxnt%CP; int j, g, hwb, lg; /* for(j = 0; j < CB; j++){ dgamma_N[np*CP*CB + cp*CB + j] = 0.0f; dbeta_N[np*CP*CB + cp*CB + j] = 0.0f; } */ all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(3, dgamma_N, np, cp, 0, CP, CB); cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(3, dbeta_N, np, cp, 0, CP, CB); cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = ds; cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = db; cfg.all_zero_kernel(&all_zero_param); /* compute a and b for each channel from group means and variance */ for(g = (cp*CB)/group_size; g < ((cp+1)*CB)/group_size; g++){ lg = g - (cp*CB)/group_size; for(j = 0; j < group_size; j++){ a[lg*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); b[lg*group_size + j] = -a[lg*group_size + j]*mean[np*G + g]; } } arg_array[1].primary = a; arg_array[2].primary = b; arg_array[4].primary = &LIBXSMM_VLA_ACCESS(3, dgamma_N, np, cp, 0, CP, CB); arg_array[5].primary = &LIBXSMM_VLA_ACCESS(3, dbeta_N, np, cp, 0, CP, CB); arg_array[6].primary = (void*)&LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); arg_array[8].primary = ds; arg_array[9].primary = db; for(hwb=0; hwb < num_HW_blocks; hwb++){ if (cfg.fuse_type == MY_GN_FUSE_RELU || cfg.fuse_type == MY_GN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_GN_FUSE_ELTWISE_RELU_WITH_MASK) { all_relu_param.op.primary = (void*)(&alpha); all_relu_param.in.primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW,CB] */ all_relu_param.in.secondary = ((cfg.fuse_type == MY_GN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_GN_FUSE_ELTWISE_RELU_WITH_MASK) ? (void*)&LIBXSMM_VLA_ACCESS(4, relumask, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB/8) : NULL /*&LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB) */ ); /* dout_fwd ? nonsense? */ all_relu_param.out.primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW,CB] */ cfg.inv_relu_kernel(&all_relu_param); } /* ReLU/mask */ if (cfg.fuse_type == MY_GN_FUSE_ELTWISE || cfg.fuse_type == MY_GN_FUSE_ELTWISE_RELU || cfg.fuse_type == MY_GN_FUSE_ELTWISE_RELU_WITH_MASK) { ewise_copy_param.in.primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); ewise_copy_param.out.primary = &LIBXSMM_VLA_ACCESS(4, din_add, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); cfg.ewise_copy_kernel(&ewise_copy_param); } /* Eltwise */ arg_array[0].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); eqn_param.output.primary = ds; cfg.ds_func(&eqn_param); eqn_param.output.primary = db; cfg.db_func(&eqn_param); eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(3, dgamma_N, np, cp, 0, CP, CB); cfg.dgamma_func(&eqn_param); eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(3, dbeta_N, np, cp, 0, CP, CB); cfg.dbeta_func(&eqn_param); } /* b = (db * mean[nb] - ds) * a * a * a * scale; */ /* c = -b * mean[nb] - db * a * scale; */ for(g = (cp*CB)/group_size; g < ((cp+1)*CB)/group_size; g++){ /* compute b and c for each channel from group means and variance */ lg = g - (cp*CB)/group_size; float gds = 0.0f; float gdb = 0.0f; for(j = 0; j < group_size; j++){ gds += ds[lg*group_size + j]; /* Group ds and db calculation */ gdb += db[lg*group_size + j]; } for(j = 0; j < group_size; j++){ b[lg*group_size + j] = (gdb * mean[np*G + g] - gds) * a[lg*group_size + j] * a[lg*group_size + j] * a[lg*group_size + j] * scale; c[lg*group_size + j] = -b[lg*group_size + j] * mean[np*G + g] - gdb * a[lg*group_size + j] * scale; } } arg_array[1].primary = a; arg_array[2].primary = b; arg_array[6].primary = (void*)&LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); arg_array[7].primary = c; for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, din, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); cfg.din_func(&eqn_param); } } libxsmm_barrier_wait(cfg.barrier, ltid); /* not needed? */ for ( cp = thr_begin_C; cp < thr_end_C; ++cp ) { all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, CB); cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, CB); cfg.all_zero_kernel(&all_zero_param); for (np=0; np < N; np++ ) { int cb; for(cb = 0; cb < CB; cb++){ LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += LIBXSMM_VLA_ACCESS(3, dgamma_N, np, cp, cb, CP, CB); LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += LIBXSMM_VLA_ACCESS(3, dbeta_N, np, cp, cb, CP, CB); } } } } else { LIBXSMM_ALIGNED(float a[CP*CB], 64); LIBXSMM_ALIGNED(float b[CP*CB], 64); LIBXSMM_ALIGNED(float c[CP*CB], 64); LIBXSMM_ALIGNED(float ds[CP*CB], 64); LIBXSMM_ALIGNED(float db[CP*CB], 64); int np; for ( np = thr_begin_N; np < thr_end_N; ++np ) { int j, g, cp, hwb; /* for(j = 0; j < CP*CB; j++){ */ /* dgamma_N[np*CP*CB + j] = 0.0f; */ /* dbeta_N[np*CP*CB + j] = 0.0f; */ /* } */ for (cp = 0; cp < CP; cp++) { all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(3, dgamma_N, np, cp, 0, CP, CB); cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(3, dbeta_N, np, cp, 0, CP, CB); cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &ds[cp*CB]; cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &db[cp*CB]; cfg.all_zero_kernel(&all_zero_param); } for(g = 0; g < G; g++){ /* compute a and b for each channel from group means and variance */ for(j = 0; j < group_size; j++){ a[g*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); b[g*group_size + j] = -a[g*group_size + j]*mean[np*G + g]; } } for (cp = 0; cp < CP; cp++) { arg_array[1].primary = &a[cp*CB]; arg_array[2].primary = &b[cp*CB]; arg_array[4].primary = &LIBXSMM_VLA_ACCESS(3, dgamma_N, np, cp, 0, CP, CB); arg_array[5].primary = &LIBXSMM_VLA_ACCESS(3, dbeta_N, np, cp, 0, CP, CB); arg_array[6].primary = (void*)&LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); arg_array[8].primary = &ds[cp*CB]; arg_array[9].primary = &db[cp*CB]; for(hwb=0; hwb < num_HW_blocks; hwb++){ if (cfg.fuse_type == MY_GN_FUSE_RELU || cfg.fuse_type == MY_GN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_GN_FUSE_ELTWISE_RELU_WITH_MASK) { all_relu_param.op.primary = (void*)(&alpha); all_relu_param.in.primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW,CB] */ all_relu_param.in.secondary = ((cfg.fuse_type == MY_GN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_GN_FUSE_ELTWISE_RELU_WITH_MASK) ? (void*)&LIBXSMM_VLA_ACCESS(4, relumask, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB/8) : NULL /*&LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB) */ ); /* dout_fwd ? nonsense? */ all_relu_param.out.primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW,CB] */ cfg.inv_relu_kernel(&all_relu_param); } /* ReLU/mask */ if (cfg.fuse_type == MY_GN_FUSE_ELTWISE || cfg.fuse_type == MY_GN_FUSE_ELTWISE_RELU || cfg.fuse_type == MY_GN_FUSE_ELTWISE_RELU_WITH_MASK) { ewise_copy_param.in.primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); ewise_copy_param.out.primary = &LIBXSMM_VLA_ACCESS(4, din_add, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); cfg.ewise_copy_kernel(&ewise_copy_param); } /* Eltwise */ arg_array[0].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); eqn_param.output.primary = &ds[cp*CB]; cfg.ds_func(&eqn_param); eqn_param.output.primary = &db[cp*CB]; cfg.db_func(&eqn_param); eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(3, dgamma_N, np, cp, 0, CP, CB); cfg.dgamma_func(&eqn_param); eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(3, dbeta_N, np, cp, 0, CP, CB); cfg.dbeta_func(&eqn_param); } } /* b = (db * mean[nb] - ds) * a * a * a * scale; */ /* c = -b * mean[nb] - db * a * scale; */ for(g = 0; g < G; g++){ /* compute b and c for each channel from group means and variance */ float gds = 0.0f; float gdb = 0.0f; for(j = 0; j < group_size; j++){ gds += ds[g*group_size + j]; /* Group ds and db calculation */ gdb += db[g*group_size + j]; } for(j = 0; j < group_size; j++){ b[g*group_size + j] = (gdb * mean[np*G + g] - gds) * a[g*group_size + j] * a[g*group_size + j] * a[g*group_size + j] * scale; c[g*group_size + j] = -b[g*group_size + j] * mean[np*G + g] - gdb * a[g*group_size + j] * scale; } } for (cp = 0; cp < CP; cp++) { arg_array[1].primary = &a[cp*CB]; arg_array[2].primary = &b[cp*CB]; arg_array[6].primary = (void*)&LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); arg_array[7].primary = &c[cp*CB]; for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, din, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); cfg.din_func(&eqn_param); } } } libxsmm_barrier_wait(cfg.barrier, ltid); int cp; for ( cp = thr_begin_C; cp < thr_end_C; ++cp ) { all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, CB); cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, CB); cfg.all_zero_kernel(&all_zero_param); for (np=0; np < N; np++ ) { int cb; for(cb = 0; cb < CB; cb++){ LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += LIBXSMM_VLA_ACCESS(3, dgamma_N, np, cp, cb, CP, CB); LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += LIBXSMM_VLA_ACCESS(3, dbeta_N, np, cp, cb, CP, CB); } } } } libxsmm_barrier_wait(cfg.barrier, ltid); }
cache.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distribute-cache-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const Quantum *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static const void *GetVirtualMetacontentFromCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *, ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,Quantum *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), OpenPixelCacheOnDisk(CacheInfo *,const MapMode), ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), ReadPixelCacheMetacontent(CacheInfo *magick_restrict, NexusInfo *magick_restrict,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict, ExceptionInfo *); static Quantum *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *,const MapMode, const RectangleInfo *,NexusInfo *,ExceptionInfo *) magick_hot_spot; #if defined(MAGICKCORE_OPENCL_SUPPORT) static void CopyOpenCLBuffer(CacheInfo *magick_restrict); #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; static ssize_t cache_anonymous_memory = (-1); static time_t cache_epoch = 0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *magick_restrict cache_info; char *value; cache_info=(CacheInfo *) AcquireQuantumMemory(1,sizeof(*cache_info)); if (cache_info == (CacheInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); value=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } value=GetPolicyValue("cache:synchronize"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } cache_info->semaphore=AcquireSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AcquireSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickCoreSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **magick_restrict nexus_info; register ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory( number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); nexus_info[0]=(NexusInfo *) AcquireQuantumMemory(number_threads, sizeof(**nexus_info)); if (nexus_info[0] == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(nexus_info[0],0,number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) number_threads; i++) { nexus_info[i]=(&nexus_info[0][i]); nexus_info[i]->signature=MagickCoreSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % const void *AcquirePixelCachePixels(const Image *image, % MagickSizeType *length,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate const void *AcquirePixelCachePixels(const Image *image, MagickSizeType *length,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((const void *) NULL); *length=cache_info->length; return((const void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickPrivate MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AcquireSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickPrivate void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); /* no op-- nothing to destroy */ RelinquishSemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate Cache ClonePixelCache(const Cache cache) { CacheInfo *magick_restrict clone_info; const CacheInfo *magick_restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *magick_restrict cache_info, *magick_restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickCoreSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClonePixelCacheOnDisk( CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info) { MagickSizeType extent; size_t quantum; ssize_t count; struct stat file_stats; unsigned char *buffer; /* Clone pixel cache on disk with identical morphology. */ if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) || (OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse)) return(MagickFalse); quantum=(size_t) MagickMaxBufferExtent; if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0)) quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent); buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); extent=0; while ((count=read(cache_info->file,buffer,quantum)) > 0) { ssize_t number_bytes; number_bytes=write(clone_info->file,buffer,(size_t) count); if (number_bytes != count) break; extent+=number_bytes; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); if (extent != cache_info->length) return(MagickFalse); return(MagickTrue); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads 2 #define cache_threads(source,destination) \ num_threads(((source)->type == DiskCache) || \ ((destination)->type == DiskCache) || (((source)->rows) < \ (16*GetMagickResourceLimit(ThreadResource))) ? 1 : \ GetMagickResourceLimit(ThreadResource) < MaxCacheThreads ? \ GetMagickResourceLimit(ThreadResource) : MaxCacheThreads) MagickBooleanType optimize, status; NexusInfo **magick_restrict cache_nexus, **magick_restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); length=cache_info->number_channels*sizeof(*cache_info->channel_map); if ((cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) && (cache_info->metacontent_extent == clone_info->metacontent_extent)) { /* Identical pixel cache morphology. */ if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache))) { (void) memcpy(clone_info->pixels,cache_info->pixels, cache_info->columns*cache_info->number_channels*cache_info->rows* sizeof(*cache_info->pixels)); if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) (void) memcpy(clone_info->metacontent,cache_info->metacontent, cache_info->columns*cache_info->rows* clone_info->metacontent_extent*sizeof(unsigned char)); return(MagickTrue); } if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache)) return(ClonePixelCacheOnDisk(cache_info,clone_info)); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads); clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads); if ((cache_nexus == (NexusInfo **) NULL) || (clone_nexus == (NexusInfo **) NULL)) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); length=cache_info->number_channels*sizeof(*cache_info->channel_map); optimize=(cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ? MagickTrue : MagickFalse; length=(size_t) MagickMin(cache_info->columns*cache_info->number_channels, clone_info->columns*clone_info->number_channels); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ cache_threads(cache_info,clone_info) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; RectangleInfo region; register ssize_t x; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region, cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region, clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; (void) ResetMagickMemory(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); if (optimize != MagickFalse) (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length* sizeof(Quantum)); else { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; /* Mismatched pixel channel map. */ p=cache_nexus[id]->pixels; q=clone_nexus[id]->pixels; for (x=0; x < (ssize_t) cache_info->columns; x++) { register ssize_t i; if (x == (ssize_t) clone_info->columns) break; for (i=0; i < (ssize_t) clone_info->number_channels; i++) { PixelChannel channel; PixelTrait traits; channel=clone_info->channel_map[i].channel; traits=cache_info->channel_map[channel].traits; if (traits != UndefinedPixelTrait) *q=*(p+cache_info->channel_map[channel].offset); q++; } p+=cache_info->number_channels; } } status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) { /* Clone metacontent. */ length=(size_t) MagickMin(cache_info->metacontent_extent, clone_info->metacontent_extent); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ cache_threads(cache_info,clone_info) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; RectangleInfo region; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region, cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region, clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; if ((clone_nexus[id]->metacontent != (void *) NULL) && (cache_nexus[id]->metacontent != (void *) NULL)) (void) memcpy(clone_nexus[id]->metacontent, cache_nexus[id]->metacontent,length*sizeof(unsigned char)); status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception); } } cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads); clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache == (void *) NULL) return; image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { #if defined(MAGICKCORE_OPENCL_SUPPORT) if (cache_info->opencl != (MagickCLCacheInfo) NULL) { cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl, MagickTrue); cache_info->pixels=(Quantum *) NULL; break; } #endif if (cache_info->mapped == MagickFalse) cache_info->pixels=(Quantum *) RelinquishAlignedMemory( cache_info->pixels); else (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(Quantum *) NULL; if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->metacontent=(void *) NULL; } MagickPrivate Cache DestroyPixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickCoreSignature); cache_info=(CacheInfo *) RelinquishMagickMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(Quantum *) NULL; nexus_info->pixels=(Quantum *) NULL; nexus_info->metacontent=(void *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { register ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) number_threads; i++) { if (nexus_info[i]->cache != (Quantum *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickCoreSignature); } nexus_info[0]=(NexusInfo *) RelinquishMagickMemory(nexus_info[0]); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontent() returns the authentic metacontent corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the associated pixels are not available. % % The format of the GetAuthenticMetacontent() method is: % % void *GetAuthenticMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void *GetAuthenticMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) { void *metacontent; metacontent=cache_info->methods. get_authentic_metacontent_from_handler(image); return(metacontent); } assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontentFromCache() returns the meta-content corresponding % with the last call to QueueAuthenticPixelsCache() or % GetAuthenticPixelsCache(). % % The format of the GetAuthenticMetacontentFromCache() method is: % % void *GetAuthenticMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void *GetAuthenticMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL % operations. % % The format of the GetAuthenticOpenCLBuffer() method is: % % cl_mem GetAuthenticOpenCLBuffer(const Image *image, % MagickCLDevice device,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o device: the device to use. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image, MagickCLDevice device,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; cl_int status; assert(image != (const Image *) NULL); assert(device != (const MagickCLDevice) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info->type == UndefinedCache) SyncImagePixelCache((Image *) image,exception); if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse)) return((cl_mem) NULL); if ((cache_info->opencl != (MagickCLCacheInfo) NULL) && (cache_info->opencl->device->context != device->context)) cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); if (cache_info->opencl == (MagickCLCacheInfo) NULL) { assert(cache_info->pixels != (Quantum *) NULL); cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels, cache_info->length); if (cache_info->opencl == (MagickCLCacheInfo) NULL) return((cl_mem) NULL); } assert(cache_info->opencl->pixels == cache_info->pixels); return(cache_info->opencl->buffer); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (Quantum *) NULL) return((Quantum *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); if (cache_info->metacontent_extent != 0) if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % Quantum *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static Quantum *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated % corresponding with the last call to QueueAuthenticPixels() or % GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % Quantum *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Quantum *GetAuthenticPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a Quantum array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image has corresponding metacontent,call % GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the % meta-content corresponding to the region. Once the Quantum array has % been updated, the changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % Quantum *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *magick_restrict image) { const CacheInfo *magick_restrict cache_info; const PixelChannelMap *magick_restrict p, *magick_restrict q; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; p=image->channel_map; q=cache_info->channel_map; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->alpha_trait != cache_info->alpha_trait) || (image->read_mask != cache_info->read_mask) || (image->write_mask != cache_info->write_mask) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (image->number_channels != cache_info->number_channels) || (memcmp(p,q,image->number_channels*sizeof(*p)) != 0) || (image->metacontent_extent != cache_info->metacontent_extent) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cache_timelimit = MagickResourceInfinity, cpu_throttle = MagickResourceInfinity, cycles = 0; status=MagickTrue; if (cpu_throttle == MagickResourceInfinity) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != 0) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (cache_epoch == 0) { /* Set the expire time in seconds. */ cache_timelimit=GetMagickResourceLimit(TimeResource); cache_epoch=time((time_t *) NULL); } if ((cache_timelimit != MagickResourceInfinity) && ((MagickSizeType) (time((time_t *) NULL)-cache_epoch) >= cache_timelimit)) { #if defined(ECANCELED) errno=ECANCELED; #endif ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } LockSemaphoreInfo(image->semaphore); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AcquireSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status != MagickFalse) { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status != MagickFalse) { destroy=MagickTrue; image->cache=clone_image.cache; } } RelinquishSemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->type == DiskCache) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MemoryCache, MapCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyPixel(const Image *image, const Quantum *source,Quantum *destination) { register ssize_t i; if (source == (const Quantum *) NULL) { destination[RedPixelChannel]=ClampToQuantum(image->background_color.red); destination[GreenPixelChannel]=ClampToQuantum( image->background_color.green); destination[BluePixelChannel]=ClampToQuantum( image->background_color.blue); destination[BlackPixelChannel]=ClampToQuantum( image->background_color.black); destination[AlphaPixelChannel]=ClampToQuantum( image->background_color.alpha); return(MagickFalse); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); destination[channel]=source[i]; } return(MagickTrue); } MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; register Quantum *magick_restrict q; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y, pixel,exception)); q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,Quantum *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id], exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixelInfo() method is: % % MagickBooleanType GetOneVirtualPixelInfo(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelInfo *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelInfo *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); GetPixelInfo(image,pixel); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (p == (const Quantum *) NULL) return(MagickFalse); GetPixelInfoPixel(image,p,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the class type of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheFilename() returns the filename associated with the pixel % cache. % % The format of the GetPixelCacheFilename() method is: % % const char *GetPixelCacheFilename(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const char *GetPixelCacheFilename(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->cache_filename); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) ResetMagickMemory(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_metacontent_from_handler= GetVirtualMetacontentFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_metacontent_from_handler= GetAuthenticMetacontentFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated % corresponding with the last call to SetPixelCacheNexusPixels() or % GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=cache_info->length; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimized cache tile width in pixels. % % o height: the optimized cache tile height in pixels. % */ MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *width=2048UL/(cache_info->number_channels*sizeof(Quantum)); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/(cache_info->number_channels*sizeof(Quantum)); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromCache() returns the meta-content corresponding with % the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualMetacontentFromCache() method is: % % void *GetVirtualMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const void *GetVirtualMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromNexus() returns the meta-content for the specified % cache nexus. % % The format of the GetVirtualMetacontentFromNexus() method is: % % const void *GetVirtualMetacontentFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the meta-content. % */ MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((void *) NULL); return(nexus_info->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontent() returns the virtual metacontent corresponding with % the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the meta-content are not available. % % The format of the GetVirtualMetacontent() method is: % % const void *GetVirtualMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const void *GetVirtualMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image); if (metacontent != (void *) NULL) return(metacontent); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsFromNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelsFromNexus() method is: % % Quantum *GetVirtualPixelsFromNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; /* Compute the remainder of dividing offset by extent. It returns not only the quotient (tile the offset falls in) but also the positive remainer within that tile such that 0 <= remainder < extent. This method is essentially a ldiv() using a floored modulo division rather than the normal default truncated modulo division. */ modulo.quotient=offset/(ssize_t) extent; if (offset < 0L) modulo.quotient--; modulo.remainder=offset-modulo.quotient*(ssize_t) extent; return(modulo); } MagickPrivate const Quantum *GetVirtualPixelsFromNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo **magick_restrict virtual_nexus; Quantum *magick_restrict pixels, virtual_pixel[MaxPixelChannels]; RectangleInfo region; register const Quantum *magick_restrict p; register const void *magick_restrict r; register Quantum *magick_restrict q; register ssize_t i, u; register unsigned char *magick_restrict s; ssize_t v; void *magick_restrict virtual_metacontent; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((const Quantum *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region,nexus_info, exception); if (pixels == (Quantum *) NULL) return((const Quantum *) NULL); q=pixels; offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(q); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); if (cache_info->metacontent_extent != 0) { status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); } return(q); } /* Pixel request is outside cache extents. */ s=(unsigned char *) nexus_info->metacontent; virtual_nexus=AcquirePixelCacheNexus(1); if (virtual_nexus == (NexusInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) ResetMagickMemory(virtual_pixel,0,cache_info->number_channels* sizeof(*virtual_pixel)); virtual_metacontent=(void *) NULL; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: case EdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: { if (cache_info->metacontent_extent != 0) { /* Acquire a metacontent buffer. */ virtual_metacontent=(void *) AcquireQuantumMemory(1, cache_info->metacontent_extent); if (virtual_metacontent == (void *) NULL) { virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); (void) ThrowMagickException(exception,GetMagickModule(), CacheError,"UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) ResetMagickMemory(virtual_metacontent,0, cache_info->metacontent_extent); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case GrayVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange/2, virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case TransparentVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,TransparentAlpha,virtual_pixel); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } default: { SetPixelRed(image,ClampToQuantum(image->background_color.red), virtual_pixel); SetPixelGreen(image,ClampToQuantum(image->background_color.green), virtual_pixel); SetPixelBlue(image,ClampToQuantum(image->background_color.blue), virtual_pixel); SetPixelBlack(image,ClampToQuantum(image->background_color.black), virtual_pixel); SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha), virtual_pixel); break; } } break; } default: break; } for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=virtual_pixel; r=virtual_metacontent; break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=virtual_pixel; r=virtual_metacontent; break; } p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } } if (p == (const Quantum *) NULL) break; (void) memcpy(q,p,(size_t) length*cache_info->number_channels* sizeof(*p)); q+=cache_info->number_channels; if ((s != (void *) NULL) && (r != (const void *) NULL)) { (void) memcpy(s,r,(size_t) cache_info->metacontent_extent); s+=cache_info->metacontent_extent; } continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,*virtual_nexus,exception); if (p == (const Quantum *) NULL) break; r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); (void) memcpy(q,p,(size_t) length*cache_info->number_channels*sizeof(*p)); q+=length*cache_info->number_channels; if ((r != (void *) NULL) && (s != (const void *) NULL)) { (void) memcpy(s,r,(size_t) length); s+=length*cache_info->metacontent_extent; } } if (u < (ssize_t) columns) break; } /* Free resources. */ if (virtual_metacontent != (void *) NULL) virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent); virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); if (v < (ssize_t) rows) return((const Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const Quantum *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const Quantum *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const Quantum *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const Quantum *GetVirtualPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % access the meta-content (of type void) corresponding to the the % region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const Quantum *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const Quantum *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated corresponding with the % last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % Quantum *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const Quantum *GetVirtualPixelsCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const Quantum *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((Quantum *) NULL); return((const Quantum *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % metacontent, and memory mapping the cache if it is disk based. The cache % nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if ((cache_info->file != -1) && (cache_info->mode == mode)) return(MagickTrue); /* cache already open and in the proper mode */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); cache_info->file=file; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *magick_restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MagickPathExtent], message[MagickPathExtent]; (void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format); (void) FormatLocaleString(message,MagickPathExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) count=(MagickOffsetType) 1; else { extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); if (count != 1) return(MagickFalse); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) (void) posix_fallocate(cache_info->file,offset+1,extent-offset); #endif } offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET); if (offset < 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, source_info; char format[MagickPathExtent], message[MagickPathExtent]; const char *type; MagickBooleanType status; MagickSizeType length, number_pixels; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (cache_anonymous_memory < 0) { char *value; /* Does the security policy require anonymous mapping for pixel cache? */ cache_anonymous_memory=0; value=GetPolicyValue("pixel-cache-memory"); if (value == (char *) NULL) value=GetPolicyValue("cache:memory-map"); if (LocaleCompare(value,"anonymous") == 0) { #if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS) cache_anonymous_memory=1; #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"DelegateLibrarySupportNotBuiltIn", "'%s' (policy requires anonymous memory mapping)",image->filename); #endif } value=DestroyString(value); } if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if ((AcquireMagickResource(WidthResource,image->columns) == MagickFalse) || (AcquireMagickResource(HeightResource,image->rows) == MagickFalse)) ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit", image->filename); source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]", image->filename,(double) GetImageIndexInList(image)); cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->alpha_trait=image->alpha_trait; cache_info->read_mask=image->read_mask; cache_info->write_mask=image->write_mask; cache_info->rows=image->rows; cache_info->columns=image->columns; InitializePixelChannelMap(image); cache_info->number_channels=GetPixelChannels(image); (void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels* sizeof(*image->channel_map)); cache_info->metacontent_extent=image->metacontent_extent; cache_info->mode=mode; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=cache_info->number_channels*sizeof(Quantum); if (image->metacontent_extent != 0) packet_size+=cache_info->metacontent_extent; length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; if (image->ping != MagickFalse) { cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->type=PingCache; return(MagickTrue); } status=AcquireMagickResource(AreaResource,cache_info->length); if (cache_info->mode == PersistMode) status=MagickFalse; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (((cache_info->type == UndefinedCache) && (status != MagickFalse)) || (cache_info->type == MemoryCache)) { status=MagickTrue; if (cache_anonymous_memory <= 0) { cache_info->mapped=MagickFalse; cache_info->pixels=(Quantum *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); } else { cache_info->mapped=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } if (cache_info->pixels == (Quantum *) NULL) cache_info->pixels=source_info.pixels; else { /* Create memory pixel cache. */ cache_info->type=MemoryCache; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ number_pixels*cache_info->number_channels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } RelinquishMagickResource(MemoryResource,cache_info->length); } /* Create pixel cache on disk. */ status=AcquireMagickResource(DiskResource,cache_info->length); if ((status == MagickFalse) || (cache_info->type == DistributedCache)) { DistributeCacheInfo *server_info; if (cache_info->type == DistributedCache) RelinquishMagickResource(DiskResource,cache_info->length); server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ status=MagickTrue; cache_info->type=DistributedCache; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MagickPathExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, GetDistributeCacheFile((DistributeCacheInfo *) cache_info->server_info),type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } RelinquishMagickResource(DiskResource,cache_info->length); (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) && (cache_info->mode != PersistMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { RelinquishMagickResource(DiskResource,cache_info->length); ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if (length != (MagickSizeType) ((size_t) length)) cache_info->type=DiskCache; else { status=AcquireMagickResource(MapResource,cache_info->length); if ((status == MagickFalse) && (cache_info->type != MapCache) && (cache_info->type != MemoryCache)) { status=MagickTrue; cache_info->type=DiskCache; } else { status=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (Quantum *) NULL) { cache_info->type=DiskCache; cache_info->pixels=source_info.pixels; } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ number_pixels*cache_info->number_channels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels, format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } RelinquishMagickResource(MapResource,cache_info->length); } status=MagickTrue; if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, *magick_restrict clone_info; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MagickPathExtent); cache_info->type=DiskCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(MagickTrue); } /* Clone persistent pixel cache. */ clone_info=(CacheInfo *) ClonePixelCache(cache_info); clone_info->type=DiskCache; (void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent); clone_info->file=(-1); clone_info->storage_class=cache_info->storage_class; clone_info->colorspace=cache_info->colorspace; clone_info->alpha_trait=cache_info->alpha_trait; clone_info->read_mask=cache_info->read_mask; clone_info->write_mask=cache_info->write_mask; clone_info->rows=cache_info->rows; clone_info->columns=cache_info->columns; clone_info->number_channels=cache_info->number_channels; clone_info->metacontent_extent=cache_info->metacontent_extent; clone_info->mode=PersistMode; clone_info->length=cache_info->length; (void) memcpy(clone_info->channel_map,cache_info->channel_map, MaxPixelChannels*sizeof(*cache_info->channel_map)); clone_info->offset=(*offset); status=ClonePixelCacheRepository(clone_info,cache_info,exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; Quantum *magick_restrict pixels; RectangleInfo region; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((Quantum *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((Quantum *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((Quantum *) NULL); /* Return pixel cache. */ region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,&region,nexus_info, exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a Quantum array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % obtain the meta-content (of type void) corresponding to the region. % Once the Quantum (and/or Quantum) array has been updated, the % changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y, columns,rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheMetacontent() reads metacontent from the specified region of % the pixel cache. % % The format of the ReadPixelCacheMetacontent() method is: % % MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the metacontent. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheMetacontent( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register ssize_t y; register unsigned char *magick_restrict q; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; q=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict p; /* Read meta-content from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->metacontent_extent*cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } break; } case DiskCache: { /* Read meta content from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read metacontent from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register Quantum *magick_restrict q; register ssize_t y; size_t number_channels, rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns; if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y) return(MagickFalse); offset+=nexus_info->region.x; number_channels=cache_info->number_channels; length=(MagickSizeType) number_channels*nexus_info->region.width* sizeof(Quantum); if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width) return(MagickFalse); rows=nexus_info->region.height; extent=length*rows; if ((extent == 0) || ((extent/length) != rows)) return(MagickFalse); y=0; q=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+offset*cache_info->number_channels; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*q),length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickPrivate Cache ReferencePixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheChannels() resets the pixel cache channels. % % The format of the ResetPixelCacheChannels method is: % % void ResetPixelCacheChannels(Image *) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate void ResetPixelCacheChannels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); cache_info->number_channels=GetPixelChannels(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e E p o c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheEpoch() resets the pixel cache epoch. % % The format of the ResetPixelCacheEpoch method is: % % void ResetPixelCacheEpoch(void) % */ MagickPrivate void ResetPixelCacheEpoch(void) { cache_epoch=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *magick_restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_metacontent_from_handler != (GetVirtualMetacontentFromHandler) NULL) cache_info->methods.get_virtual_metacontent_from_handler= cache_methods->get_virtual_metacontent_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) cache_info->methods.get_authentic_metacontent_from_handler= cache_methods->get_authentic_metacontent_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % Quantum SetPixelCacheNexusPixels(const CacheInfo *cache_info, % const MapMode mode,const RectangleInfo *region,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o region: A pointer to the RectangleInfo structure that defines the % region of this particular cache nexus. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *magick_restrict cache_info,NexusInfo *nexus_info, ExceptionInfo *exception) { if (nexus_info->length != (MagickSizeType) ((size_t) nexus_info->length)) return(MagickFalse); if (cache_anonymous_memory <= 0) { nexus_info->mapped=MagickFalse; nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) nexus_info->length)); if (nexus_info->cache != (Quantum *) NULL) (void) ResetMagickMemory(nexus_info->cache,0,(size_t) nexus_info->length); } else { nexus_info->mapped=MagickTrue; nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) nexus_info->length); } if (nexus_info->cache == (Quantum *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } return(MagickTrue); } static inline MagickBooleanType IsPixelCacheAuthentic( const CacheInfo *magick_restrict cache_info, const NexusInfo *magick_restrict nexus_info) { MagickBooleanType status; MagickOffsetType offset; /* Does nexus pixels point directly to in-core cache pixels or is it buffered? */ if (cache_info->type == PingCache) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; status=nexus_info->pixels == (cache_info->pixels+offset* cache_info->number_channels) ? MagickTrue : MagickFalse; return(status); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels,0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels,1,1); } static Quantum *SetPixelCacheNexusPixels(const CacheInfo *cache_info, const MapMode mode,const RectangleInfo *region,NexusInfo *nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((Quantum *) NULL); if ((region->width == 0) || (region->height == 0)) return((Quantum *) NULL); nexus_info->region=(*region); number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; if (number_pixels == 0) return((Quantum *) NULL); if ((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) { ssize_t x, y; x=nexus_info->region.x+(ssize_t) nexus_info->region.width-1; y=nexus_info->region.y+(ssize_t) nexus_info->region.height-1; if (((nexus_info->region.x >= 0) && (x < (ssize_t) cache_info->columns) && (nexus_info->region.y >= 0) && (y < (ssize_t) cache_info->rows)) && ((nexus_info->region.height == 1UL) || ((nexus_info->region.x == 0) && ((nexus_info->region.width == cache_info->columns) || ((nexus_info->region.width % cache_info->columns) == 0))))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; nexus_info->pixels=cache_info->pixels+cache_info->number_channels* offset; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(unsigned char *) cache_info->metacontent+ offset*cache_info->metacontent_extent; PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsPixelCacheAuthentic(cache_info, nexus_info); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ length=number_pixels*cache_info->number_channels*sizeof(Quantum); if (cache_info->metacontent_extent != 0) length+=number_pixels*cache_info->metacontent_extent; if (nexus_info->cache == (Quantum *) NULL) { nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((Quantum *) NULL); } } else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((Quantum *) NULL); } } nexus_info->pixels=nexus_info->cache; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(void *) (nexus_info->pixels+number_pixels* cache_info->number_channels); PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsPixelCacheAuthentic(cache_info, nexus_info); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; CacheView *magick_restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } status=SyncCacheViewAuthenticPixels(image_view,exception); } image_view=DestroyCacheView(image_view); return(status); } MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); if ((IsPixelInfoGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); break; } case TransparentVirtualPixelMethod: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); break; } default: break; } return(method); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have % been completed and updates the host memory. % % The format of the SyncAuthenticOpenCLBuffer() method is: % % void SyncAuthenticOpenCLBuffer(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info) { assert(cache_info != (CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->type != MemoryCache) || (cache_info->opencl == (MagickCLCacheInfo) NULL)) return; /* Ensure single threaded access to OpenCL environment. */ LockSemaphoreInfo(cache_info->semaphore); cache_info->opencl=(MagickCLCacheInfo) CopyMagickCLCacheInfo( cache_info->opencl); UnlockSemaphoreInfo(cache_info->semaphore); } MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); cache_info=(CacheInfo *) image->cache; CopyOpenCLBuffer(cache_info); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) { image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickCoreSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->metacontent_extent != 0) && (WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (status != MagickFalse) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) { status=cache_info->methods.sync_authentic_pixels_handler(image, exception); return(status); } assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheMetacontent() writes the meta-content to the specified region % of the pixel cache. % % The format of the WritePixelCacheMetacontent() method is: % % MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the meta-content. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const unsigned char *magick_restrict p; register ssize_t y; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=(MagickSizeType) length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict q; /* Write associated pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width*cache_info->metacontent_extent; q+=cache_info->columns*cache_info->metacontent_extent; } break; } case DiskCache: { /* Write associated pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write metacontent to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const Quantum *magick_restrict p; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width* sizeof(Quantum); extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+offset*cache_info->number_channels; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*nexus_info->region.width; q+=cache_info->columns*cache_info->number_channels; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*p),length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
Array.h
#ifndef COSMO_UTILS_ARRAY_H #define COSMO_UTILS_ARRAY_H #include <string> #include <utility> #include <iostream> #include "TriCubicInterpolator.h" namespace cosmo { template<typename IT, typename RT> class CosmoArray { public: IT nx, ny, nz; IT pts = 0; std::string name; RT* _array; CosmoArray() {} CosmoArray(IT n_in) { init(n_in, n_in, n_in); } CosmoArray(IT nx_in, IT ny_in, IT nz_in) { init(nx_in, ny_in, nz_in); } ~CosmoArray() { if(pts > 0) delete [] _array; } void setName(std::string name_in) { name = name_in; } void init(IT nx_in, IT ny_in, IT nz_in) { nx = nx_in; ny = ny_in; nz = nz_in; pts = nx*ny*nz; _array = new RT[pts]; #pragma omp parallel for for(IT i=0; i<pts; ++i) { _array[i] = 0.0; } } IT _IT_mod(IT n, IT d) const { IT mod = n % d; if(mod < 0) mod += d; return mod; } RT sum() { RT res = 0.0; #pragma omp parallel for reduction(+:res) for(IT i=0; i<pts; ++i) { res += _array[i]; } return res; } RT avg() { return sum() / (RT)pts; } RT min() { RT min_res = 1e100; #pragma omp parallel for for(IT i = 0; i<pts; i++) { #pragma omp critical if(_array[i] < min_res) min_res = _array[i]; } return min_res; } RT max() { RT max_res = -1e100; #pragma omp parallel for for(IT i = 0; i<pts; i++) { #pragma omp critical { if(_array[i] > max_res) max_res = _array[i]; } } return max_res; } RT abs_max() { RT max_res = 0; #pragma omp parallel for for(IT i = 0; i<pts; i++) { #pragma omp critical { if(fabs(_array[i]) > max_res) max_res = fabs(_array[i]); } } return max_res; } RT L2_norm() { RT L2 = 0; #pragma omp parallel for reduction(+:L2) for(IT i=0; i<pts; ++i) { L2 += _array[i] * _array[i]; } return sqrt(L2); } IT idx(IT i_in, IT j_in, IT k_in) { IT i=i_in, j=j_in, k=k_in; // indexing only works down to negative 100*(nx, ny, nz)? // Using this is slow. Use a macro instead. if(i_in < 0 || i_in >= nx) i = (i_in%nx + nx)%nx; if(j_in < 0 || j_in >= ny) j = (j_in%ny + ny)%ny; if(k_in < 0 || k_in >= nz) k = (k_in%nz + nz)%nz; return ( i*ny*nz + j*nz + k ); } RT& operator()(IT i, IT j, IT k) { IT x = idx(i, j, k); return _array[x]; } CosmoArray& operator=(const CosmoArray& other) { // check for self-assignment if(&other == this) return *this; this->nx = other.nx; this->nz = other.ny; this->ny = other.nz; this->pts = other.pts; this->name = other.name; #pragma omp parallel for for(IT i=0; i<pts; ++i) { this->_array[i] = other._array[i]; } return *this; } RT& operator[](IT idx) { return _array[idx]; } // Weighted averaging / trilinear interpolation via // https://en.wikipedia.org/wiki/Trilinear_interpolation#Method RT getInterpolatedValue(RT i_in, RT j_in, RT k_in) { IT il = i_in < 0 ? (IT) i_in - 1 : (IT) i_in; // Index "left" of i RT id = i_in - il; // fractional difference IT jl = j_in < 0 ? (IT) j_in - 1 : (IT) j_in; // same as ^ but j RT jd = j_in - jl; IT kl = k_in < 0 ? (IT) k_in - 1 : (IT) k_in; // same as ^ but k RT kd = k_in - kl; RT c00 = _array[idx(il, jl, kl)]*(1-id) + _array[idx(il+1, jl, kl)]*id; RT c01 = _array[idx(il, jl, kl+1)]*(1-id) + _array[idx(il+1, jl, kl+1)]*id; RT c10 = _array[idx(il, jl+1, kl)]*(1-id) + _array[idx(il+1, jl+1, kl)]*id; RT c11 = _array[idx(il, jl+1, kl+1)]*(1-id) + _array[idx(il+1, jl+1, kl+1)]*id; RT c0 = c00*(1-jd) + c10*jd; RT c1 = c01*(1-jd) + c11*jd; return c0*(1-kd) + c1*kd; } // Catmull-Rom cubic spline RT CINT(RT u, RT p0, RT p1, RT p2, RT p3) { return 0.5*( (u*u*(2.0 - u) - u)*p0 + (u*u*(3.0*u - 5.0) + 2)*p1 + (u*u*(4.0 - 3.0*u) + u)*p2 + u*u*(u - 1.0)*p3 ); } RT getTriCubicInterpolatedValue(RT i_in, RT j_in, RT k_in) { IT il = i_in < 0 ? (IT) i_in - 1 : (IT) i_in; // Index "left" of i RT id = i_in - il; // fractional difference // special 1d case if(ny==1 && nz==1) { return CINT(id, _array[idx(il-1, 0, 0)], _array[idx(il, 0, 0)], _array[idx(il+1, 0, 0)], _array[idx(il+2, 0, 0)]); } IT jl = j_in < 0 ? (IT) j_in - 1 : (IT) j_in; // same as ^ but j RT jd = j_in - jl; IT kl = k_in < 0 ? (IT) k_in - 1 : (IT) k_in; // same as ^ but k RT kd = k_in - kl; // interpolated value at (i*, j*, k_in) RT * F_i_j_kd = new RT[16]; for(IT i=0; i<4; ++i) for(IT j=0; j<4; ++j) F_i_j_kd[i*4+j] = CINT(kd, _array[idx(il+i-1, jl+j-1, kl-1)], _array[idx(il+i-1, jl+j-1, kl+0)], _array[idx(il+i-1, jl+j-1, kl+1)], _array[idx(il+i-1, jl+j-1, kl+2)]); // interpolated value at (i*, j_in, k_in) RT * F_i_jd_kd = new RT[4]; for(IT i=0; i<4; ++i) F_i_jd_kd[i] = CINT(jd, F_i_j_kd[i*4+0], F_i_j_kd[i*4+1], F_i_j_kd[i*4+2], F_i_j_kd[i*4+3]); // interpolated value at (i_in, j_in, k_in) RT Fijk = CINT(id, F_i_jd_kd[0], F_i_jd_kd[1], F_i_jd_kd[2], F_i_jd_kd[3]); delete [] F_i_j_kd; delete [] F_i_jd_kd; return Fijk; } }; template <class T> void cosmoArraySwap(T & arr1, T & arr2) { std::swap(arr1.nx, arr2.nx); std::swap(arr1.ny, arr2.ny); std::swap(arr1.nz, arr2.nz); std::swap(arr1.pts, arr2.pts); std::swap(arr1.name, arr2.name); std::swap(arr1._array, arr2._array); } } #endif
XSHA512_fmt_plug.c
/* * This file is part of John the Ripper password cracker, * Copyright (c) 2008,2011 by Solar Designer */ #if FMT_EXTERNS_H extern struct fmt_main fmt_XSHA512; #elif FMT_REGISTERS_H john_register_one(&fmt_XSHA512); #else #include "sha2.h" #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "simd-intrinsics.h" #include "rawSHA512_common.h" //#undef SIMD_COEF_64 #ifdef _OPENMP #include <omp.h> #ifdef SIMD_COEF_64 #ifndef OMP_SCALE #define OMP_SCALE 4096 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 8192 #endif #endif #endif #include "memdbg.h" #define FORMAT_LABEL "xsha512" #define FORMAT_NAME "Mac OS X 10.7" #define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME #define PLAINTEXT_LENGTH 107 #define SALT_SIZE 4 #define SALT_ALIGN sizeof(ARCH_WORD_32) #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #if ARCH_BITS >= 64 || defined(__SSE2__) /* 64-bitness happens to correlate with faster memcpy() */ #define PRECOMPUTE_CTX_FOR_SALT #else #undef PRECOMPUTE_CTX_FOR_SALT #endif #define BINARY_SIZE DIGEST_SIZE #ifdef SIMD_COEF_64 #define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 ) static ARCH_WORD_64 (*saved_key)[SHA_BUF_SIZ*MAX_KEYS_PER_CRYPT]; static ARCH_WORD_64 (*crypt_out); static int max_keys; #else static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int (*saved_len); static ARCH_WORD_32 (*crypt_out)[16]; #ifdef PRECOMPUTE_CTX_FOR_SALT static SHA512_CTX ctx_salt; #else static ARCH_WORD_32 saved_salt; #endif #endif static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_64 #ifndef _OPENMP int omp_t = 1; #endif saved_key = mem_calloc_align(omp_t, sizeof(*saved_key), MEM_ALIGN_SIMD); crypt_out = mem_calloc_align(self->params.max_keys_per_crypt, 8 * sizeof(ARCH_WORD_64), MEM_ALIGN_SIMD); max_keys = self->params.max_keys_per_crypt; #else saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); #endif } static void done(void) { MEM_FREE(crypt_out); #ifndef SIMD_COEF_64 MEM_FREE(saved_len); #endif MEM_FREE(saved_key); } static void *get_salt(char *ciphertext) { static union { unsigned char c[SALT_SIZE]; ARCH_WORD_32 dummy; } buf; unsigned char *out = buf.c; char *p; int i; ciphertext += XSHA512_TAG_LENGTH; p = ciphertext; for (i = 0; i < sizeof(buf.c); i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } #ifdef SIMD_COEF_64 #define HASH_IDX (((unsigned int)index&(SIMD_COEF_64-1))+(unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64) static int get_hash_0 (int index) { return crypt_out[HASH_IDX] & PH_MASK_0; } static int get_hash_1 (int index) { return crypt_out[HASH_IDX] & PH_MASK_1; } static int get_hash_2 (int index) { return crypt_out[HASH_IDX] & PH_MASK_2; } static int get_hash_3 (int index) { return crypt_out[HASH_IDX] & PH_MASK_3; } static int get_hash_4 (int index) { return crypt_out[HASH_IDX] & PH_MASK_4; } static int get_hash_5 (int index) { return crypt_out[HASH_IDX] & PH_MASK_5; } static int get_hash_6 (int index) { return crypt_out[HASH_IDX] & PH_MASK_6; } #else static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } #endif static int salt_hash(void *salt) { return *(ARCH_WORD_32 *)salt & (SALT_HASH_SIZE - 1); } static void set_salt(void *salt) { #ifndef SIMD_COEF_64 #ifdef PRECOMPUTE_CTX_FOR_SALT SHA512_Init(&ctx_salt); SHA512_Update(&ctx_salt, salt, SALT_SIZE); #else saved_salt = *(ARCH_WORD_32 *)salt; #endif #else int i; unsigned char *wucp = (unsigned char*)saved_key; for (i = 0; i < max_keys; ++i) { wucp[GETPOS(0, i)] = ((char*)salt)[0]; wucp[GETPOS(1, i)] = ((char*)salt)[1]; wucp[GETPOS(2, i)] = ((char*)salt)[2]; wucp[GETPOS(3, i)] = ((char*)salt)[3]; } #endif } static void set_key(char *key, int index) { #ifndef SIMD_COEF_64 int length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; saved_len[index] = length; memcpy(saved_key[index], key, length); #else ARCH_WORD_64 *keybuffer = &((ARCH_WORD_64 *)saved_key)[(index&(SIMD_COEF_64-1)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]; ARCH_WORD_64 *keybuf_word = keybuffer; unsigned int len; ARCH_WORD_64 temp; unsigned char *wucp = (unsigned char*)saved_key; // ok, first 4 bytes (if there are that many or more), we handle one offs. // this is because we already have 4 byte salt loaded into our saved_key. // IF there are more bytes of password, we drop into the multi loader. #if ARCH_ALLOWS_UNALIGNED const ARCH_WORD_64 *wkey = (ARCH_WORD_64*)&(key[4]); #else char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint64_t)); const ARCH_WORD_64 *wkey = is_aligned(key + 4, sizeof(uint64_t)) ? (ARCH_WORD_64*)(key + 4) : (ARCH_WORD_64*)buf_aligned; if ((char *)wkey == buf_aligned && strlen(key) >= 4) strcpy(buf_aligned, key + 4); #endif len = 4; if (key[0] == 0) {wucp[GETPOS(4, index)] = 0x80; wucp[GETPOS(5, index)] = wucp[GETPOS(6, index)] = wucp[GETPOS(7, index)] = 0; goto key_cleaning; } wucp[GETPOS(4, index)] = key[0]; ++len; if (key[1] == 0) {wucp[GETPOS(5, index)] = 0x80; wucp[GETPOS(6, index)] = wucp[GETPOS(7, index)] = 0; goto key_cleaning; } wucp[GETPOS(5, index)] = key[1]; ++len; if (key[2] == 0) {wucp[GETPOS(6, index)] = 0x80; wucp[GETPOS(7, index)] = 0; goto key_cleaning; } wucp[GETPOS(6, index)] = key[2]; ++len; if (key[3] == 0) {wucp[GETPOS(7, index)] = 0x80; goto key_cleaning; } wucp[GETPOS(7, index)] = key[3]; ++len; keybuf_word += SIMD_COEF_64; while((unsigned char)(temp = *wkey++)) { if (!(temp & 0xff00)) { *keybuf_word = JOHNSWAP64((temp & 0xff) | (0x80 << 8)); len++; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = JOHNSWAP64((temp & 0xffff) | (0x80 << 16)); len+=2; goto key_cleaning; } if (!(temp & 0xff000000)) { *keybuf_word = JOHNSWAP64((temp & 0xffffff) | (0x80ULL << 24)); len+=3; goto key_cleaning; } if (!(temp & 0xff00000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffff) | (0x80ULL << 32)); len+=4; goto key_cleaning; } if (!(temp & 0xff0000000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffffffULL) | (0x80ULL << 40)); len+=5; goto key_cleaning; } if (!(temp & 0xff000000000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffffffffULL) | (0x80ULL << 48)); len+=6; goto key_cleaning; } if (!(temp & 0xff00000000000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffffffffffULL) | (0x80ULL << 56)); len+=7; goto key_cleaning; } *keybuf_word = JOHNSWAP64(temp); len += 8; keybuf_word += SIMD_COEF_64; } *keybuf_word = 0x8000000000000000ULL; key_cleaning: keybuf_word += SIMD_COEF_64; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_64; } keybuffer[15*SIMD_COEF_64] = len << 3; #endif } static char *get_key(int index) { #ifndef SIMD_COEF_64 saved_key[index][saved_len[index]] = 0; return saved_key[index]; #else static unsigned char key[PLAINTEXT_LENGTH+1]; int i; unsigned char *wucp = (unsigned char*)saved_key; ARCH_WORD_64 *keybuffer = &((ARCH_WORD_64*)saved_key)[(index&(SIMD_COEF_64-1)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]; int len = (keybuffer[15*SIMD_COEF_64] >> 3) - SALT_SIZE; for (i = 0; i < len; ++i) key[i] = wucp[GETPOS(SALT_SIZE + i, index)]; key[i] = 0; return (char*)key; #endif } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; #ifdef _OPENMP #ifndef SIMD_COEF_64 #ifdef PRECOMPUTE_CTX_FOR_SALT #pragma omp parallel for default(none) private(index) shared(ctx_salt, saved_key, saved_len, crypt_out) #else #pragma omp parallel for default(none) private(index) shared(saved_salt, saved_key, saved_len, crypt_out) #endif #else #pragma omp parallel for #endif #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #ifdef SIMD_COEF_64 SIMDSHA512body(&saved_key[index/MAX_KEYS_PER_CRYPT], &crypt_out[HASH_IDX], NULL, SSEi_MIXED_IN); #else SHA512_CTX ctx; #ifdef PRECOMPUTE_CTX_FOR_SALT memcpy(&ctx, &ctx_salt, sizeof(ctx)); #else SHA512_Init(&ctx); SHA512_Update(&ctx, &saved_salt, SALT_SIZE); #endif SHA512_Update(&ctx, saved_key[index], saved_len[index]); SHA512_Final((unsigned char *)(crypt_out[index]), &ctx); #endif } return count; } static int cmp_all(void *binary, int count) { unsigned int index; for (index = 0; index < count; index++) #ifdef SIMD_COEF_64 if (((ARCH_WORD_64 *) binary)[0] == crypt_out[HASH_IDX]) #else if ( ((ARCH_WORD_32*)binary)[0] == crypt_out[index][0] ) #endif return 1; return 0; } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_64 int i; for (i = 0; i < BINARY_SIZE/sizeof(ARCH_WORD_64); i++) if (((ARCH_WORD_64*) binary)[i] != crypt_out[HASH_IDX + i*SIMD_COEF_64]) return 0; return 1; #else return !memcmp(binary, crypt_out[index], BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_XSHA512 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, XSHA512_BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, sha512_common_tests_xsha512 }, { init, done, fmt_default_reset, sha512_common_prepare_xsha512, sha512_common_valid_xsha512, sha512_common_split_xsha512, sha512_common_binary_xsha512, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
tinyexr.h
/* Copyright (c) 2014 - 2019, Syoyo Fujita and many contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Syoyo Fujita nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- #ifndef TINYEXR_H_ #define TINYEXR_H_ // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif // Use embedded miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-5) #define TINYEXR_ERROR_CANT_OPEN_FILE (-6) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-7) #define TINYEXR_ERROR_INVALID_HEADER (-8) #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-9) #define TINYEXR_ERROR_CANT_WRITE_FILE (-10) #define TINYEXR_ERROR_SERIALZATION_FAILED (-11) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_HEADER_ATTRIBUTES (1024) #define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 int tiled; // tile format image int long_name; // long name attribute int non_image; // deep image(EXR 2.0) int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; int data_window[4]; int display_window[4]; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute *custom_attributes; // array of EXRAttribute. size = // `num_custom_attributes`. EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { to be removed. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // @deprecated { to be removed. } // Simple wrapper API for ParseEXRHeaderFromFile. // checking given file is a EXR file(by just look up header) // @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for // others extern int IsEXR(const char *filename); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. // Use ZIP compression by default. // Returns negative value and may set error string in `err` when there's an // error extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename, const char **err); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Free's internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Free's internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Free's error message extern void FreeEXRErrorMessage(const char *msg); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if success. // Return zero and will set error string in `err` when there's an // error. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEIFNED #define TINYEXR_IMPLEMENTATION_DEIFNED #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <sstream> // #include <iostream> // debug #include <limits> #include <string> #include <vector> #if __cplusplus > 199711L // C++11 #include <cstdint> #endif // __cplusplus > 199711L #ifdef _OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #else // Issue #46. Please include your own zlib-compatible API header before // including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #include "zfp.h" #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif #if TINYEXR_USE_MINIZ namespace miniz { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #pragma clang diagnostic ignored "-Wundef" #if __has_warning("-Wcomma") #pragma clang diagnostic ignored "-Wcomma" #endif #if __has_warning("-Wmacro-redefined") #pragma clang diagnostic ignored "-Wmacro-redefined" #endif #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #if __has_warning("-Wtautological-constant-compare") #pragma clang diagnostic ignored "-Wtautological-constant-compare" #endif #endif /* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major release with Zip64 support (almost there!): - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug (thanks kahmyong.moon@hp.com) which could cause locate files to not find files. This bug would only have occured in earlier versions if you explicitly used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you can't switch to v1.15 but want to fix this bug, just remove the uses of this flag from both helper funcs (and of course don't use the flag). - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when pUser_read_buf is not NULL and compressed size is > uncompressed size - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract compressed data from directory entries, to account for weird zipfiles which contain zero-size compressed data on dir entries. Hopefully this fix won't cause any issues on weird zip archives, because it assumes the low 16-bits of zip external attributes are DOS attributes (which I believe they always are in practice). - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the internal attributes, just the filename and external attributes - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed - Added cmake support for Linux builds which builds all the examples, tested with clang v3.3 and gcc v4.6. - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti - Merged MZ_FORCEINLINE fix from hdeanclark - Fix <time.h> include before config #ifdef, thanks emil.brink - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping (super useful for OpenGL apps), and explicit control over the compression level (so you can set it to 1 for real-time compression). - Merged in some compiler fixes from paulharris's github repro. - Retested this build under Windows (VS 2010, including static analysis), tcc 0.9.26, gcc v4.6 and clang v3.3. - Added example6.c, which dumps an image of the mandelbrot set to a PNG file. - Modified example2 to help test the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix possible src file fclose() leak if alignment bytes+local header file write faiiled - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the wrong central dir header offset, appears harmless in this release, but it became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect). 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files. - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning). - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64. - Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test. - Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives. - Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.) - Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's. level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <bruced@valvesoftware.com> for the feedback/bug report. 5/28/11 v1.11 - Added statement from unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: - Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a - Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). - Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. - Refactored the compression code for better readability and maintainability. - Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). 5/15/11 v1.09 - Initial stable release. * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED //#include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be // CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on // stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able // to get the current time, or // get/set file times, and the C run-time funcs that get/set times won't be // called. // The current downside is the times written to your archives will be from 1979. #define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. #define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive // API's. //#define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression // API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent // conflicts against stock zlib. //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom // user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's // which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and // tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc // on Linux #define MINIZ_NO_TIME #endif #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) //#include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #if MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient // integer loads and stores from unaligned addresses. //#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \ 0 // disable to suppress compiler warnings #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are // reasonably fast (and don't involve compiler generated calls to helper // functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some // parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! typedef unsigned long mz_ulong; // mz_free() internally uses the MZ_FREE() macro (which by default calls free() // unless you've modified the MZ_MALLOC macro) to release a block allocated from // the heap. void mz_free(void *p); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with // ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with // ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: // items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void (*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_VERSION "9.1.15" #define MZ_VERNUM 0x91F0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 15 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The // other values are for advanced use (refer to the zlib docs). enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels: 0-9 are the standard zlib-style levels, 10 is best // possible compression (not zlib compatible, and may be very slow), // MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been // optimized purely for performance, not ratio. // (This special func. is currently only enabled when // MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with // zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no // header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as // calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input // and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or // MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not // available, and/or there's more output to be written but the output buffer // is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been // written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or // output buffers are empty. (Fill up the input buffer or free up some output // space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of // data that could be generated by deflate(), assuming flush is set to only // MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on // failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of // data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that // controls the window size and whether or not the stream has been wrapped with // a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or // -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the // input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output // buffers are both sized large enough to decompress the entire stream in a // single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside // what's already in the input buffer, and that the output buffer is large // enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or // there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes // have been written. For zlib streams, the adler-32 of the decompressed data // has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is // empty but the inflater needs more input to continue, or if the output // buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when // using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on // failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the // error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used // as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you // use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // An attempt to work around MSVC's spammy "warning C4127: conditional // expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct mz_zip_archive_tag { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is // 0 this function returns the number of bytes needed to fully store the // filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and // modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive // file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient // in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the // archive's filename so it can be reopened for writing. If the file can't be // reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be // growable using the realloc callback (which defaults to realloc unless you've // overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's // user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what // you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record // the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a // forwardslash with empty buffer. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records // the disk file's modified time into the archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no // recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by // the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive // struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be // valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if // mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) // appends a memory blob to a ZIP archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags used by tinfl_decompress(). // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and // ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the // input is a raw deflate stream. // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available // beyond the end of the supplied input buffer. If clear, the input buffer // contains all remaining input. // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large // enough to hold the entire decompressed stream. If clear, the output buffer is // at least the size of the dictionary (typically 32KB). // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the // decompressed bytes. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data // to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must call mz_free() on the returned block when it's no longer // needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block // in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes // written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an // internal 32KB buffer, and a user provided callback function will be called to // flush the buffer. // Returns 1 on success or 0 on failure. typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function // actually needed for decompression. All the other functions are just // high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any // desired higher level decompression API. In the limit case, it can be called // once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly // slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // tdefl_init() compression flags logically OR'd together (low 12 bits contain // the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes // per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap // compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before // the deflate data, and the Adler-32 of the source data at the end. Otherwise, // you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even // when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more // efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's // initialization time to the minimum, but the output may vary from run to run // given the same input (depending on the contents of memory). // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. // The low 12 bits are reserved to control the max # of hash probes per // dictionary lookup (see TDEFL_MAX_PROBES_MASK). enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against // the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in // memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be // 1, 2, 3, or 4. // The image pitch in bytes per scanline will be w*num_chans. The leftmost // pixel on the top scanline is stored first in memory. // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL // If flip is true, the image will be flipped on the Y axis (useful for OpenGL // apps). // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must mz_free() the returned heap block (which will typically be // larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write // compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The // above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed // output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper // functions aren't flexible enough. The low-level functions don't make any heap // allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1 } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; // tdefl's compression state structure. typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. // There is no corresponding deinit() function because the tdefl API's do not // dynamically allocate memory. // pBut_buf_func: If NULL, output data will be supplied to the specified // callback. In this case, the user should call the tdefl_compress_buffer() API // for compression. // If pBut_buf_func is NULL the user should always call the tdefl_compress() // API. // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, // etc.) tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the specified input buffer // as possible, and writing as much compressed data to the specified output // buffer as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a // non-NULL tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't // defined, because it uses some of its macros. #ifndef MINIZ_NO_ZLIB_APIS // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be // much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, // MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #endif // #ifndef MINIZ_NO_ZLIB_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want // the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; //#include <assert.h> //#include <string.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void)x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE inline __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C // implementation that balances processor cache usage against speed": // http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c}; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } void mz_free(void *p) { MZ_FREE(p); } #ifndef MINIZ_NO_ZLIB_APIS static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } // static void *def_realloc_func(void *opaque, void *address, size_t items, // size_t size) { // (void)opaque, (void)address, (void)items, (void)size; // return MZ_REALLOC(address, items * size); //} const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty // tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are // large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress( &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some // uncompressed data left in the output dictionary - // oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress // without supplying more input or by setting flush // to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data // when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's // at least 1 more byte on the way. If there's no more room left in the // output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = {{MZ_OK, ""}, {MZ_STREAM_END, "stream end"}, {MZ_NEED_DICT, "need dictionary"}, {MZ_ERRNO, "file error"}, {MZ_STREAM_ERROR, "stream error"}, {MZ_DATA_ERROR, "data error"}, {MZ_MEM_ERROR, "out of memory"}, {MZ_BUF_ERROR, "buf error"}, {MZ_VERSION_ERROR, "version error"}, {MZ_PARAM_ERROR, "parameter error"}}; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif // MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all // compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do { \ for (;;) { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt // to read beyond the input buf, then something is wrong with the input because // the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of // the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) \ do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for (;;) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes // remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode // the next Huffman code (and absolutely no more). It works by trying to fully // decode a // Huffman code by using whatever bits are currently present in the bit buffer. // If this fails, it reads another byte, and tries again until it succeeds or // until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex // than you would initially expect because the zlib API expects the decompressor // to never read // beyond the final byte of the deflate stream. (In other words, when this macro // wants to read another byte from the input, it REALLY needs another byte in // order to fully // decode the next Huffman code.) Handling this properly is particularly // important on raw deflate (non-zlib) streams, which aren't followed by a byte // aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0}; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static const int s_min_table_sizes[3] = {257, 1, 4}; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1ULL << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit: r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress( &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression // API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285}; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0}; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17}; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29}; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted // values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, // alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = \ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS( d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF}; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && // MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output // buffer and send a raw block instead. if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS( d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed // block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN( (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && \ (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better // register utilization. Intended for applications where raw throughput is // valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to // TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output // buffer. if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } #ifndef MINIZ_NO_ZLIB_APIS static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; // level may actually range from [0,10] (10 is a "hidden" max level, where we // want a bit more compression and it's fine if throughput to fall off a cliff // on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #endif // MINIZ_NO_ZLIB_APIS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public // domain: https://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. // This is actually a modification of Alex's original code so PNG files // generated by this function pass pngcheck. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was // defined. static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init( pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06}; mz_uint8 pnghdr[41] = {0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0, (mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0, 0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16), (mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41, 0x54}; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter( "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's // where #defined out) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #error "No arvhive APIs" #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #else #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler // alignment and platform endian issues, miniz.c doesn't use structs for any of // this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ ((element_type *)((array_ptr)->m_p))[index] static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; // On Linux with x86 glibc, this call will fail on large files (>= 0x80000000 // bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } #ifndef MINIZ_NO_TIME static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif // #ifndef MINIZ_NO_TIME #endif // #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central // directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), // but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename( mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for (;;) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); // Basic sanity checks - reject files which are too small, and check the first // 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end // towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; // Read the entire central directory into a heap block, and allocate another // heap block to hold the unsorted central dir file record offsets, and // another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return MZ_FALSE; } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some // basic sanity checking on each record, and check for zip64 entries (which // are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return MZ_FALSE; } file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh( mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; // First see if the filename ends with a '/' character. filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } // Bugfix: This code was also checking if the internal attribute was non-zero, // which wasn't correct. // Most/all zip writers (hopefully) set DOS file/directory attributes in the // low 16-bits, so check for the DOS directory flag and ignore the source OS // ID in the created by field. // FIXME: Remove this check? Is it necessary - we already check the filename. external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & 0x10) != 0) return MZ_TRUE; return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_reader_filename_compare( const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input // buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #endif return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #endif return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input // buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #endif return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32( file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback( pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } #endif // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); #ifdef _MSC_VER if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #else if ((!n) || ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #endif return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc( pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max // size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is // NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can // resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the // user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory // location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir( mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header( pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start // with a forward slash, cannot contain a drive letter, and cannot use // DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment( mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an // allocation fails the file remains unmodified. (A good idea if we're doing // an in-place modification.) if ((!mz_zip_array_ensure_room( pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32( uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer( pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back( pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid // central directory. (This may not always succeed, but we can try.) if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE; if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE; if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ // ---------------------- end of miniz ---------------------------------------- #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef _MSC_VER #pragma warning(pop) #endif } // namespace miniz #else // Reuse MINIZ_LITTE_ENDIAN macro #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #endif // TINYEXR_USE_MINIZ // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static void SetErrorMessage(const std::string &msg, const char **err) { if (err) { #ifdef _WIN32 (*err) = _strdup(msg.c_str()); #else (*err) = strdup(msg.c_str()); #endif } } static const int kEXRVersionSize = 8; static void cpy2(unsigned short *dst_val, const unsigned short *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; } static void swap2(unsigned short *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-function" #endif static void cpy4(int *dst_val, const int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(unsigned int *dst_val, const unsigned int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(float *dst_val, const float *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } #ifdef __clang__ #pragma clang diagnostic pop #endif static void swap4(unsigned int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } #if 0 static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; } #endif static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 // Reuse MINIZ_LITTLE_ENDIAN flag from miniz. union FP32 { unsigned int u; float f; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif static const char *ReadString(std::string *s, const char *ptr, size_t len) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((size_t(q - ptr) < len) && (*q) != 0) { q++; } if (size_t(q - ptr) >= len) { (*s) = std::string(); return NULL; } (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len == 0) { if ((*type).compare("string") == 0) { // Accept empty string attribute. marker += sizeof(uint32_t); size -= sizeof(uint32_t); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t); data->resize(1); (*data)[0] = '\0'; return true; } else { return false; } } marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen)); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; int data_window[4]; int line_order; int display_window[4]; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; void clear() { channels.clear(); attributes.clear(); data_window[0] = 0; data_window[1] = 0; data_window[2] = 0; data_window[3] = 0; line_order = 0; display_window[0] = 0; display_window[1] = 0; display_window[2] = 0; display_window[3] = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; } } HeaderInfo; static bool ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data())); if (data_len < 0) { return false; } p = ReadString(&info.name, p, size_t(data_len)); if ((p == NULL) && (info.name.empty())) { // Buffer overrun. Issue #51. return false; } const unsigned char *data_end = reinterpret_cast<const unsigned char *>(p) + 16; if (data_end >= (data.data() + data.size())) { return false; } memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling)); channels.push_back(info); } return true; } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling)); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(src_size); int ret = miniz::mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (miniz::MZ_OK != ret) { return false; } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { return false; } #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressable run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; // Fixes #116: Add bounds check to in buffer. if ((0 > (maxLength -= count)) || (inLength < 0)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } // Workaround for issue #112. // TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`. if (src_size <= 2) { return false; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); if (ret != static_cast<int>(uncompressed_size)) { return false; } // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierachical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- int len : 8; // code length 0 int lit : 24; // lit p size int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // std::vector<int> hlink(HUF_ENCSIZE); std::vector<long long *> fHeap(HUF_ENCSIZE); *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); std::vector<long long> scode(HUF_ENCSIZE); memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode.data()); memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode > ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { int *p = pl->p; pl->p = new int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #if 0 #define getCode(po, rlc, c, lc, in, out, ob, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ /* TinyEXR issue 78 */ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } #else static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in, const char *in_end, unsigned short *&out, const unsigned short *ob, const unsigned short *oe) { (void)ob; if (po == rlc) { if (lc < 8) { /* TinyEXR issue 78 */ if ((in + 1) >= in_end) { return false; } getChar(c, lc, in); } lc -= 8; unsigned char cs = (c >> lc); if (out + cs > oe) return false; // Bounds check for safety // Issue 100. if ((out - 1) < ob) return false; unsigned short s = out[-1]; while (cs-- > 0) *out++ = s; } else if (out < oe) { *out++ = po; } else { return false; } return true; } #endif // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; // begin unsigned short *oe = out + no; // end const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; // std::cout << "lit = " << pl.lit << std::endl; // std::cout << "rlc = " << rlc << std::endl; // std::cout << "c = " << c << std::endl; // std::cout << "lc = " << lc << std::endl; // std::cout << "in = " << in << std::endl; // std::cout << "out = " << out << std::endl; // std::cout << "oe = " << oe << std::endl; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) { return false; } break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(std::vector<long long> &freq, const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; std::vector<long long> freq(HUF_ENCSIZE); countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq.data(), &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq.data(), im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, std::vector<unsigned short> *raw) { if (nCompressed == 0) { if (raw->size() != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(), raw->data()); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap.data(), minNonZero, maxNonZero); std::vector<unsigned short> lut(USHORT_RANGE); unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data()); applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSize) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap.data(), 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; // minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr)); // maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } std::vector<unsigned short> lut(USHORT_RANGE); memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data()); // // Huffman decoding // int length; // length = *(reinterpret_cast<const int *>(ptr)); tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); if (size_t((ptr - inPtr) + length) > inLen) { return false; } std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; int precision; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0f; } }; bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) && (attributes[i].size == 1)) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; } } if (!foundType) { return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else { assert(0); } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, int num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = dst_width * dst_num_lines * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((dst_width & 3U) || (dst_num_lines & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, dst_width, dst_num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = dst_width * dst_num_lines; for (int c = 0; c < num_channels; c++) { // decompress 4x4 pixel block. for (int y = 0; y < dst_num_lines; y += 4) { for (int x = 0; x < dst_width; x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * dst_width + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((width & 3U) || (num_lines & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, width, num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = width * num_lines; for (int c = 0; c < num_channels; c++) { // compress 4x4 pixel block. for (int y = 0; y < num_lines; y += 4) { for (int x = 0; x < width; x += 4) { float fblock[16]; for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * width + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = zfp_stream_compressed_size(zfp); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // // TODO(syoyo): Refactor function arguments. static bool DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) { // Invalid input #90 return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); if (!ret) { return false; } // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; // hf.u = line_ptr[u]; // use `cpy` to avoid unaligned memory access when compiler's // optimization is on. tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); if (dstLen == 0) { return false; } if (!tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, attributes, num_attributes)) { assert(0); return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // address may not be aliged. use byte-wise copy for safety.#76 // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { if (reinterpret_cast<const unsigned char *>(line_ptr + u) >= (data_ptr + data_len)) { // Corrupsed data? return false; } unsigned int val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } return true; } static void DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { assert(tile_offset_x * tile_size_x < data_width); assert(tile_offset_y * tile_size_y < data_height); // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { // ??? return false; } } return true; } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; info->data_window[0] = 0; info->data_window[1] = 0; info->data_window[2] = 0; info->data_window[3] = 0; info->line_order = 0; // @fixme info->display_window[0] = 0; info->display_window[1] = 0; info->display_window[2] = 0; info->display_window[3] = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) { if (0 == size) { if (err) { (*err) += "Insufficient data size for attributes.\n"; } return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { if (err) { (*err) += "Failed to read attribute.\n"; } return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (version->tiled && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!ReadChannelInfo(info->channels, data)) { if (err) { (*err) += "Failed to parse channel info.\n"; } return TINYEXR_ERROR_INVALID_DATA; } if (info->channels.size() < 1) { if (err) { (*err) += "# of channels is zero.\n"; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { if (data.size() >= 16) { memcpy(&info->data_window[0], &data.at(0), sizeof(int)); memcpy(&info->data_window[1], &data.at(4), sizeof(int)); memcpy(&info->data_window[2], &data.at(8), sizeof(int)); memcpy(&info->data_window[3], &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3])); has_data_window = true; } } else if (attr_name.compare("displayWindow") == 0) { if (data.size() >= 16) { memcpy(&info->display_window[0], &data.at(0), sizeof(int)); memcpy(&info->display_window[1], &data.at(4), sizeof(int)); memcpy(&info->display_window[2], &data.at(8), sizeof(int)); memcpy(&info->display_window[3], &data.at(12), sizeof(int)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[3])); has_display_window = true; } } else if (attr_name.compare("lineOrder") == 0) { if (data.size() >= 1) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } } else if (attr_name.compare("pixelAspectRatio") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio)); has_pixel_aspect_ratio = true; } } else if (attr_name.compare("screenWindowCenter") == 0) { if (data.size() >= 8) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[1])); has_screen_window_center = true; } } else if (attr_name.compare("screenWindowWidth") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_width)); has_screen_window_width = true; } } else if (attr_name.compare("chunkCount") == 0) { if (data.size() >= sizeof(int)) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count)); } } else { // Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header or invalid." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window[0] = info.display_window[0]; exr_header->display_window[1] = info.display_window[1]; exr_header->display_window[2] = info.display_window[2]; exr_header->display_window[3] = info.display_window[3]; exr_header->data_window[0] = info.data_window[0]; exr_header->data_window[1] = info.data_window[1]; exr_header->data_window[2] = info.data_window[2]; exr_header->data_window[3] = info.data_window[3]; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); if (exr_header->num_custom_attributes > 0) { // TODO(syoyo): Report warning when # of attributes exceeds // `TINYEXR_MAX_CUSTOM_ATTRIBUTES` if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) { exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES; } exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc( sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes))); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy poiner exr_header->custom_attributes[i].value = info.attributes[i].value; } } else { exr_header->custom_attributes = NULL; } exr_header->header_len = info.header_len; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const std::vector<tinyexr::tinyexr_uint64> &offsets, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; if ((data_width < 0) || (data_height < 0)) { if (err) { std::stringstream ss; ss << "Invalid data width or data height: " << data_width << ", " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } // Do not allow too large data_width and data_height. header invalid? { const int threshold = 1024 * 8192; // heuristics if ((data_width > threshold) || (data_height > threshold)) { if (err) { std::stringstream ss; ss << "data_with or data_height too large. data_width: " << data_width << ", " << "data_height = " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } } size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety. if (exr_header->tiled) { // value check if (exr_header->tile_size_x < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } if (exr_header->tile_size_y < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } size_t num_tiles = offsets.size(); // = # of blocks exr_image->tiles = static_cast<EXRTile *>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) { // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) if (offsets[tile_idx] + sizeof(int) * 5 > size) { if (err) { (*err) += "Insufficient data size.\n"; } return TINYEXR_ERROR_INVALID_DATA; } size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3])); // @todo{ LoD } if (tile_coordinates[2] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } if (tile_coordinates[3] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len < 4 || size_t(data_len) > data_size) { if (err) { (*err) += "Insufficient data length.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; exr_image->num_tiles = static_cast<int>(num_tiles); } } else { // scanline format // Don't allow too large image(256GB * pixel_data_size or more). Workaround // for #104. size_t total_data_len = size_t(data_width) * size_t(data_height) * size_t(num_channels); if ((total_data_len == 0) || (total_data_len >= 0x4000000000)) { if (err) { std::stringstream ss; ss << "Image data size is zero or too large: width = " << data_width << ", height = " << data_height << ", channels = " << num_channels << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (size_t(data_len) > data_size) { invalid_data = true; } else if (data_len == 0) { // TODO(syoyo): May be ok to raise the threshold for example `data_len // < 4` invalid_data = true; } else { // line_no may be negative. int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window[3] + 1)); int num_lines = end_line_no - line_no; if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y // overflow check tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window[1]); if (lno > std::numeric_limits<int>::max()) { line_no = -1; // invalid } else if (lno < -std::numeric_limits<int>::max()) { line_no = -1; // invalid } else { line_no -= exr_header->data_window[1]; } if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } } // omp parallel } if (invalid_data) { if (err) { std::stringstream ss; (*err) += "Invalid data found when decoding pixels.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0]; if (data_width >= std::numeric_limits<int>::max()) { // Issue 63 tinyexr::SetErrorMessage("Invalid data width value", err); return TINYEXR_ERROR_INVALID_DATA; } data_width++; int data_height = exr_header->data_window[3] - exr_header->data_window[1]; if (data_height >= std::numeric_limits<int>::max()) { tinyexr::SetErrorMessage("Invalid data height value", err); return TINYEXR_ERROR_INVALID_DATA; } data_height++; if ((data_width < 0) || (data_height < 0)) { tinyexr::SetErrorMessage("data width or data height is negative.", err); return TINYEXR_ERROR_INVALID_DATA; } // Do not allow too large data_width and data_height. header invalid? { const int threshold = 1024 * 8192; // heuristics if (data_width > threshold) { tinyexr::SetErrorMessage("data width too large.", err); return TINYEXR_ERROR_INVALID_DATA; } if (data_height > threshold) { tinyexr::SetErrorMessage("data height too large.", err); return TINYEXR_ERROR_INVALID_DATA; } } // Read offset tables. size_t num_blocks = 0; if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); } else if (exr_header->tiled) { // @todo { LoD } size_t num_x_tiles = static_cast<size_t>(data_width) / static_cast<size_t>(exr_header->tile_size_x); if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) < static_cast<size_t>(data_width)) { num_x_tiles++; } size_t num_y_tiles = static_cast<size_t>(data_height) / static_cast<size_t>(exr_header->tile_size_y); if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) < static_cast<size_t>(data_height)) { num_y_tiles++; } num_blocks = num_x_tiles * num_y_tiles; } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } } std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks); for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; // Issue #81 if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { tinyexr::SetErrorMessage( "Cannot reconstruct lineOffset table in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } } } { std::string e; int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } // release memory(if exists) if ((exr_header->num_channels > 0) && exr_image && exr_image->images) { for (size_t c = 0; c < size_t(exr_header->num_channels); c++) { if (exr_image->images[c]) { free(exr_image->images[c]); exr_image->images[c] = NULL; } } free(exr_image->images); exr_image->images = NULL; } } return ret; } } } // namespace tinyexr int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { if (out_rgba == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Invalid EXR header.", err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if (exr_header.num_channels == 1) { // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[0][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // Assume RGB(A) if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int IsEXR(const char *filename) { EXRVersion exr_version; int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { return TINYEXR_ERROR_INVALID_HEADER; } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { tinyexr::SetErrorMessage( "Invalid argument. `memory` or `exr_header` argument is null in " "ParseEXRHeaderFromMemory()", err); // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Insufficient header/data size.\n", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { tinyexr::SetErrorMessage(err_str, err); } } ConvertHeader(exr_header, info); // transfoer `tiled` from version. exr_header->tiled = version->tiled; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Failed to parse EXR version", err); return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } // TODO(syoyo): Refactor removing same code as used in LoadEXR(). if (exr_header.num_channels == 1) { // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[0][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // TODO(syoyo): Support non RGBA image. if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize < 16) { tinyexr::SetErrorMessage("File size too short " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } size_t SaveEXRImageToMemory(const EXRImage *exr_image, const EXRHeader *exr_header, unsigned char **memory_out, const char **err) { if (exr_image == NULL || memory_out == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err); return 0; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #endif #if TINYEXR_USE_ZFP for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) { if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) { tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression", err); return 0; } } #endif std::vector<unsigned char> memory; // Header { const char header[] = {0x76, 0x2f, 0x31, 0x01}; memory.insert(memory.end(), header, header + 4); } // Version, scanline. { char marker[] = {2, 0, 0, 0}; /* @todo if (exr_header->tiled) { marker[1] |= 0x2; } if (exr_header->long_name) { marker[1] |= 0x4; } if (exr_header->non_image) { marker[1] |= 0x8; } if (exr_header->multipart) { marker[1] |= 0x10; } */ memory.insert(memory.end(), marker, marker + 4); } int num_scanlines = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } // Write attributes. std::vector<tinyexr::ChannelInfo> channels; { std::vector<unsigned char> data; for (int c = 0; c < exr_header->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_header->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_header->channels[c].name); channels.push_back(info); } tinyexr::WriteChannelInfo(data, channels); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_header->compression_type; tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp)); tinyexr::WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char *>(&comp), 1); } { int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3])); tinyexr::WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); tinyexr::WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { float aspectRatio = 1.0f; tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio)); tinyexr::WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float)); } { float center[2] = {0.0f, 0.0f}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[1])); tinyexr::WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float)); } { float w = static_cast<float>(exr_image->width); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char *>(&w), sizeof(float)); } // Custom attributes if (exr_header->num_custom_attributes > 0) { for (int i = 0; i < exr_header->num_custom_attributes; i++) { tinyexr::WriteAttributeToMemory( &memory, exr_header->custom_attributes[i].name, exr_header->custom_attributes[i].type, reinterpret_cast<const unsigned char *>( exr_header->custom_attributes[i].value), exr_header->custom_attributes[i].size); } } { // end of header unsigned char e = 0; memory.push_back(e); } int num_blocks = exr_image->height / num_scanlines; if (num_blocks * num_scanlines < exr_image->height) { num_blocks++; } std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks)); size_t headerSize = memory.size(); tinyexr::tinyexr_uint64 offset = headerSize + static_cast<size_t>(num_blocks) * sizeof( tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable) std::vector<std::vector<unsigned char> > data_list( static_cast<size_t>(num_blocks)); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } } #endif // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { size_t ii = static_cast<size_t>(i); int start_y = num_scanlines * i; int endY = (std::min)(num_scanlines * (i + 1), exr_image->height); int h = endY - start_y; std::vector<unsigned char> buf( static_cast<size_t>(exr_image->width * h * pixel_data_size)); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f)); // line_ptr[x] = f32.f; tinyexr::cpy4(line_ptr + x, &(f32.f)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned short val = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap2(&val); // line_ptr[x] = val; tinyexr::cpy2(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // line_ptr[x] = h16.u; tinyexr::cpy2(line_ptr + x, &(h16.u)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { float val = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned int val = reinterpret_cast<unsigned int **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } } if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(buf.size()); memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), buf.begin(), buf.begin() + data_len); } else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 8192 + static_cast<unsigned int>( 2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, exr_image->width, h); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), exr_image->width, h, exr_header->num_channels, zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else { assert(0); } } // omp parallel for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size(); } size_t totalSize = static_cast<size_t>(offset); { memory.insert( memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)), reinterpret_cast<unsigned char *>(&offsets.at(0)) + sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks)); } if (memory.size() == 0) { tinyexr::SetErrorMessage("Output memory size is zero", err); return 0; } (*memory_out) = static_cast<unsigned char *>(malloc(totalSize)); memcpy((*memory_out), &memory.at(0), memory.size()); unsigned char *memory_ptr = *memory_out + memory.size(); for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { memcpy(memory_ptr, &data_list[i].at(0), data_list[i].size()); memory_ptr += data_list[i].size(); } return totalSize; // OK } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "wb"); #else FILE *fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if (mem_size == 0) { return TINYEXR_ERROR_SERIALZATION_FAILED; } size_t written_size = 0; if ((mem_size > 0) && mem) { written_size = fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); if (written_size != mem_size) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _MSC_VER FILE *fp = NULL; errno_t errcode = fopen_s(&fp, filename, "rb"); if ((0 != errcode) || (!fp)) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); tinyexr::SetErrorMessage("File size is zero : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { tinyexr::SetErrorMessage("Invalid magic number", err); return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { tinyexr::SetErrorMessage("Unsupported version or scanline", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { marker++; size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { std::stringstream ss; ss << "Failed to parse attribute\n"; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { std::stringstream ss; ss << "Unsupported compression type : " << compression_type; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!tinyexr::ReadChannelInfo(channels, data)) { tinyexr::SetErrorMessage("Failed to parse channel info", err); return TINYEXR_ERROR_INVALID_DATA; } num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { tinyexr::SetErrorMessage("Invalid channels format", err); return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh)); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&h)); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { tinyexr::SetErrorMessage("Unsupported compression format", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) { return false; } assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); if (dstLen) { if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize))) { return false; } assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui; unsigned int *src_ptr = reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); tinyexr::cpy4(&ui, src_ptr); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; const unsigned short *src_ptr = reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::cpy2(&(f16.u), src_ptr); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f; const float *src_ptr = reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); tinyexr::cpy4(&f, src_ptr); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->num_tiles = 0; } void FreeEXRErrorMessage(const char *msg) { if (msg) { free(reinterpret_cast<void *>(const_cast<char *>(msg))); } return; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } if (exr_header->custom_attributes) { free(exr_header->custom_attributes); } return TINYEXR_SUCCESS; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } free(exr_image->tiles); } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("fread() error on " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Data size too short", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage(err_str, err); return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { tinyexr::SetErrorMessage( "`chunkCount' attribute is not found in the header.", err); return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); ConvertHeader(exr_header, infos[i]); // transfoer `tiled` from version. exr_header->tiled = exr_version->tiled; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromFile()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromMemory()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list; for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> offset_table( static_cast<size_t>(exr_headers[i]->chunk_count)); for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } chunk_offset_table_list.push_back(offset_table); } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> &offset_table = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (size_t c = 0; c < offset_table.size(); c++) { const unsigned char *part_number_addr = memory + offset_table[c] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } } std::string e; int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table, memory, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename, const char **err) { if ((components == 1) || components == 3 || components == 4) { // OK } else { std::stringstream ss; ss << "Unsupported component value : " << components << std::endl; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRHeader header; InitEXRHeader(&header); if ((width < 16) && (height < 16)) { // No compression for small image. header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE; } else { header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP; } EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = {0, 0, 0, 0}; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } int ret = SaveEXRImageToFile(&image, &header, outfilename, err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef __clang__ // zero-as-null-ppinter-constant #pragma clang diagnostic pop #endif #endif // TINYEXR_IMPLEMENTATION_DEIFNED #endif // TINYEXR_IMPLEMENTATION
test-mempool.c
/* xZTL: Zone Translation Layer User-space Library * * Copyright 2019 Samsung Electronics * * Written by Ivan L. Picoli <i.picoli@samsung.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <pthread.h> #include <omp.h> #include <xztl.h> #include <xztl-mempool.h> #include <ztl-media.h> #include "CUnit/Basic.h" static const char **devname; static void cunit_mempool_assert_ptr(char *fn, void *ptr) { CU_ASSERT((uint64_t) ptr != 0); if (!ptr) printf("\n %s: ptr %p\n", fn, ptr); } static void cunit_mempool_assert_int(char *fn, int status) { CU_ASSERT(status == 0); if (status) printf(" %s: %x\n", fn, status); } static int cunit_mempool_init(void) { return 0; } static int cunit_mempool_exit(void) { return 0; } static void test_mempool_init(void) { int ret; ret = znd_media_register(*devname); cunit_mempool_assert_int("znd_media_register", ret); if (ret) return; ret = xztl_media_init(); cunit_mempool_assert_int("xztl_media_init", ret); if (ret) return; cunit_mempool_assert_int("xztl_mempool_init", xztl_mempool_init()); } static void test_mempool_create(void) { uint16_t type, tid, ents; uint32_t ent_sz; type = XZTL_MEMPOOL_MCMD; tid = 0; ents = 32; ent_sz = 1024; cunit_mempool_assert_int("xztl_mempool_create", xztl_mempool_create(type, tid, ents, ent_sz, NULL, NULL)); } static void test_mempool_destroy(void) { uint16_t type, tid; type = XZTL_MEMPOOL_MCMD; tid = 0; cunit_mempool_assert_int("xztl_mempool_destroy", xztl_mempool_destroy(type, tid)); } static void test_mempool_create_mult(void) { uint16_t type, tid, ents; uint32_t ent_sz; type = XZTL_MEMPOOL_MCMD; ents = 32; ent_sz = 128; #pragma omp parallel for for (tid = 0; tid < 8; tid++) { cunit_mempool_assert_int("xztl_mempool_create", xztl_mempool_create(type, tid, ents, ent_sz, NULL, NULL)); } } static void test_mempool_get_put(void) { uint16_t ent_i, ents = 30; uint32_t ent_sz = 128; struct xztl_mp_entry *ent[ents]; /* Get entries */ for (ent_i = 0; ent_i < ents; ent_i++) { ent[ent_i] = xztl_mempool_get(XZTL_MEMPOOL_MCMD, 0); cunit_mempool_assert_ptr("xztl_mempool_get", ent[ent_i]); } /* Modify entry bytes */ for (ent_i = 0; ent_i < ents; ent_i++) memset(ent[ent_i]->opaque, 0x0, ent_sz); /* Put entries */ for (ent_i = 0; ent_i < ents; ent_i++) { xztl_mempool_put(ent[ent_i], XZTL_MEMPOOL_MCMD, 0); CU_PASS("xztl_mempool_put"); } /* Repeat the process */ for (ent_i = 0; ent_i < ents; ent_i++) { ent[ent_i] = xztl_mempool_get(XZTL_MEMPOOL_MCMD, 0); cunit_mempool_assert_ptr("xztl_mempool_get", ent[ent_i]); } for (ent_i = 0; ent_i < ents; ent_i++) memset(ent[ent_i]->opaque, 0x0, ent_sz); for (ent_i = 0; ent_i < ents; ent_i++) { xztl_mempool_put(ent[ent_i], XZTL_MEMPOOL_MCMD, 0); CU_PASS("xztl_mempool_put"); } } static void test_mempool_exit(void) { cunit_mempool_assert_int("xztl_mempool_exit", xztl_mempool_exit()); cunit_mempool_assert_int("xztl_media_exit", xztl_media_exit()); } int main(int argc, const char **argv) { int failed; if (argc < 2) { printf("Please provide the device path. e.g. liou:/dev/nvme0n2\n"); return -1; } devname = &argv[1]; printf("Device: %s\n", *devname); CU_pSuite pSuite = NULL; if (CUE_SUCCESS != CU_initialize_registry()) return CU_get_error(); pSuite = CU_add_suite("Suite_mempool", cunit_mempool_init, cunit_mempool_exit); if (pSuite == NULL) { CU_cleanup_registry(); return CU_get_error(); } if ((CU_add_test(pSuite, "Initialize", test_mempool_init) == NULL) || (CU_add_test(pSuite, "Create a mempool", test_mempool_create) == NULL) || (CU_add_test(pSuite, "Destroy a mempool", test_mempool_destroy) == NULL) || (CU_add_test(pSuite, "Create parallel mempools", test_mempool_create_mult) == NULL) || (CU_add_test(pSuite, "Get and put entries", test_mempool_get_put) == NULL) || (CU_add_test(pSuite, "Closes the module", test_mempool_exit) == NULL)) { CU_cleanup_registry(); return CU_get_error(); } CU_basic_set_mode(CU_BRM_VERBOSE); CU_basic_run_tests(); failed = CU_get_number_of_tests_failed(); CU_cleanup_registry(); return failed; }
logramp.c
#include<Python.h> #include<numpy/arrayobject.h> #include<math.h> #include<omp.h> #define IND(a,i) *((double *)(a->data+i*a->strides[0])) static PyObject *logramp(PyObject *self, PyObject *args, PyObject *keywds); static PyObject *logramp(PyObject *self, PyObject *args, PyObject *keywds) { PyObject *etc; PyArrayObject *x,*y, *rampparams; double x0,a,b,c,d,e; int i; npy_intp dims[1]; static char *kwlist[] = {"rampparams","x","etc",NULL}; //etc = PyList_New(0); if(!PyArg_ParseTupleAndKeywords(args,keywds,"OO|O",kwlist,&rampparams,&x,&etc)) { return NULL; } x0 = IND(rampparams,0); a = IND(rampparams,1); b = IND(rampparams,2); c = IND(rampparams,3); d = IND(rampparams,4); e = IND(rampparams,5); dims[0] = x->dimensions[0]; y = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE); #pragma omp parallel for for(i=0;i<dims[0];i++) { if(IND(x,i)<=x0) IND(y,i) = e; else IND(y,i) = a*pow(log(IND(x,i)-x0),4)+b*pow(log(IND(x,i)-x0),3) \ +c*pow(log(IND(x,i)-x0),2)+d*log(IND(x,i)-x0)+e; } return PyArray_Return(y); } static char module_docstring[]="\ NAME:\n\ LOGRAMP\n\ \n\ PURPOSE:\n\ This function creates a model that fits a natural log + linear ramped eclipse\n\ \n\ CATEGORY:\n\ Astronomy.\n\ \n\ CALLING SEQUENCE:\n\ \n\ Result = LOGRAMP([midpt,width,depth,x12,x34,x0,b,c],x)\n\ \n\ INPUTS:\n\ midpt: Midpoint of eclipse\n\ width: Eclipse durations\n\ depth: Depth of eclipse\n\ x12: Ingress time\n\ x34: Egress time\n\ x0: time offset\n\ b: x constant\n\ c: x=0 offset\n\ x: Array of time/phase points\n\ \n\ OUTPUTS:\n\ This function returns an array of y values by combining an eclipse and the ramp model\n\ \n\ PROCEDURE:\n\ \n\ EXAMPLE:\n\ \n\ \n\ \n\ MODIFICATION HISTORY:\n\ Written by: Kevin Stevenson, UCF\n\n\ kevin218@knights.ucf.edu\n\ \n\ 2008-06-26 Original creation \n\ \n\ 2018-11-22 Jonathan Fraine, SSI\n\ jfraine at spacescience.org\n\ Updated c extensions to python3, with support for python2.7\n\ "; static PyMethodDef module_methods[] = { {"logramp",(PyCFunction)logramp,METH_VARARGS|METH_KEYWORDS,module_docstring},{NULL}}; PyMODINIT_FUNC #if PY_MAJOR_VERSION >= 3 PyInit_logramp(void) #else initlogramp(void) #endif { #if PY_MAJOR_VERSION >= 3 PyObject *module; static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "logramp", /* m_name */ module_docstring, /* m_doc */ -1, /* m_size */ module_methods, /* m_methods */ NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL, /* m_free */ }; #endif #if PY_MAJOR_VERSION >= 3 module = PyModule_Create(&moduledef); if (!module) return NULL; /* Load `numpy` functionality. */ import_array(); return module; #else PyObject *m = Py_InitModule3("logramp", module_methods, module_docstring); if (m == NULL) return; /* Load `numpy` functionality. */ import_array(); #endif }
GB_binop__remainder_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__remainder_fp32 // A.*B function (eWiseMult): GB_AemultB__remainder_fp32 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__remainder_fp32 // C+=b function (dense accum): GB_Cdense_accumb__remainder_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__remainder_fp32 // C=scalar+B GB_bind1st__remainder_fp32 // C=scalar+B' GB_bind1st_tran__remainder_fp32 // C=A+scalar GB_bind2nd__remainder_fp32 // C=A'+scalar GB_bind2nd_tran__remainder_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = remainderf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = remainderf (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_REMAINDER || GxB_NO_FP32 || GxB_NO_REMAINDER_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__remainder_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__remainder_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__remainder_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__remainder_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__remainder_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__remainder_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float bij = Bx [p] ; Cx [p] = remainderf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__remainder_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; Cx [p] = remainderf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = remainderf (x, aij) ; \ } GrB_Info GB_bind1st_tran__remainder_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = remainderf (aij, y) ; \ } GrB_Info GB_bind2nd_tran__remainder_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Sqrt.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/Sqrt.c" #else void THNN_(Sqrt_updateOutput)( THNNState *state, THTensor *input, THTensor *output, accreal eps_) { THTensor_(resizeAs)(output, input); THTensor_(sqrt)(output, input); } void THNN_(Sqrt_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *output) { THNN_CHECK_SHAPE(output, gradOutput); THTensor_(resizeAs)(gradInput, input); if (output->nDimension == 1 || !THTensor_(isContiguous)(output) || !THTensor_(isContiguous)(gradOutput) || !THTensor_(isContiguous)(gradInput)) { TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, output, *gradInput_data = (*output_data == 0.0) ? 0.0 : (0.5 * (*gradOutput_data / *output_data)); ); } else { real *gradOutput_data = THTensor_(data)(gradOutput); real *gradInput_data = THTensor_(data)(gradInput); real *output_data = THTensor_(data)(output); int64_t i; #pragma omp parallel for private(i) for(i = 0; i < THTensor_(nElement)(output); i++) { if (output_data[i] == 0.0) gradInput_data[i] = 0.0; else gradInput_data[i] = 0.5 * (gradOutput_data[i] / output_data[i]); } } } #endif
md5-avx2.c
#define _BSD_SOURCE #include <stdint.h> #include <iso646.h> #include <stdbool.h> #include <string.h> #include "proofofwork-private.h" static inline __m256i md5_f(__m256i x, __m256i y, __m256i z) { return (x & y) | (~ x & z); } static inline __m256i md5_g(__m256i x, __m256i y, __m256i z) { return (x & z) | (y & ~ z); } static inline __m256i md5_h(__m256i x, __m256i y, __m256i z) { return x ^ y ^ z; } static inline __m256i md5_i(__m256i x, __m256i y, __m256i z) { return y ^ (x | ~ z); } static inline void md5_roundv(__m256i *a, __m256i b, __m256i c, __m256i d, __m256i x, unsigned s, uint32_t t, __m256i (*f)(__m256i, __m256i, __m256i)) { __m256i acc = _mm256_add_epi32(_mm256_add_epi32(*a, f(b, c, d)), _mm256_add_epi32(x, _mm256_set1_epi32(t))); *a = _mm256_add_epi32(b, mm256_rol_epi32(acc, s)); } static inline void md5_round(__m256i *a, __m256i b, __m256i c, __m256i d, uint32_t x, unsigned s, uint32_t t, __m256i (*f)(__m256i, __m256i, __m256i)) { __m256i acc = _mm256_add_epi32(_mm256_add_epi32(*a, f(b, c, d)), _mm256_set1_epi32(x + t)); *a = _mm256_add_epi32(b, mm256_rol_epi32(acc, s)); } static inline __m256i md5_cmp(__m256i x, uint32_t x0, uint32_t mask, uint32_t target) { return _mm256_cmpeq_epi32(_mm256_add_epi32(x, _mm256_set1_epi32(x0)) & _mm256_set1_epi32(mask), _mm256_set1_epi32(target)); } static const uint32_t md5_a0 = 0x67452301; static const uint32_t md5_b0 = 0xefcdab89; static const uint32_t md5_c0 = 0x98badcfe; static const uint32_t md5_d0 = 0x10325476; uint64_t pow_md5_count = 0; // for benchmark bool pow_md5_mine(uint8_t const *mask, uint8_t const *target, uint8_t *buffer, uint64_t size, int32_t const *indices) { // check arguments static_assert (__BYTE_ORDER == __LITTLE_ENDIAN, ""); if (mask == NULL) return false; if (target == NULL) return false; if (buffer == NULL) return false; if (indices == NULL) return false; for (int i = 0; i < pow_indices_length; ++ i) { if (indices[i] < -1 or (int64_t)size <= indices[i]) return false; } if (indices[0] == -1) return false; if (size > pow_md5_block_length - sizeof(uint64_t) / CHAR_BIT - 1) return false; // load hash const uint32_t mask_a = ((uint32_t *)mask)[0]; const uint32_t mask_b = ((uint32_t *)mask)[1]; const uint32_t mask_c = ((uint32_t *)mask)[2]; const uint32_t mask_d = ((uint32_t *)mask)[3]; const uint32_t target_a = ((uint32_t *)target)[0] & mask_a; const uint32_t target_b = ((uint32_t *)target)[1] & mask_b; const uint32_t target_c = ((uint32_t *)target)[2] & mask_c; const uint32_t target_d = ((uint32_t *)target)[3] & mask_d; // load text uint8_t local[pow_md5_block_length]; memcpy(local, buffer, pow_md5_block_length); local[size] = '\x80'; for (int i = size+1; i < pow_md5_block_length - sizeof(uint64_t) / CHAR_BIT; ++ i) local[i] = '\0'; ((uint32_t *)local)[14] = size * CHAR_BIT; const uint32_t x14 = size * CHAR_BIT; static const uint32_t x15 = 0; // load indices and alphabet to modify the text const int index0 = indices[0]; const int index1 = indices[1]; const int index2 = indices[2]; const int index3 = indices[3]; const int index4 = indices[4]; const int index5 = indices[5]; const int index6 = indices[6]; const int index7 = indices[7]; static_assert (pow_indices_length == 8, ""); repeat (i,pow_indices_length) { if (indices[i] != -1) { local[indices[i]] = 0; } } uint32_t *padded_alphabet = malloc(alphabet_size * sizeof(uint32_t)); repeat (i,alphabet_size) { uint32_t c = alphabet[i]; if (i - vector_width >= 0) c ^= alphabet[i - vector_width]; padded_alphabet[i] = c << (index0 % 4 * CHAR_BIT); } // search bool found = false; uint64_t cnt = 0; #pragma omp parallel for shared(found) firstprivate(local) reduction(+:cnt) repeat (i1, alphabet_size) { if (index1 != -1) local[index1] = alphabet[i1]; if (found or (index1 == -1 and i1 != 0)) continue; repeat (i7, alphabet_size) { if (index7 != -1) local[index7] = alphabet[i7]; repeat (i6, alphabet_size) { if (index6 != -1) local[index6] = alphabet[i6]; repeat (i5, alphabet_size) { if (index5 != -1) local[index5] = alphabet[i5]; repeat (i4, alphabet_size) { if (index4 != -1) local[index4] = alphabet[i4]; repeat (i3, alphabet_size) { if (index3 != -1) local[index3] = alphabet[i3]; cnt += alphabet_size * (alphabet_size / vector_width * vector_width); repeat (i2, alphabet_size) { if (index2 != -1) local[index2] = alphabet[i2]; __m256i y0 = _mm256_set1_epi32(((uint32_t *)local)[0 ]); __m256i y1 = _mm256_set1_epi32(((uint32_t *)local)[1 ]); __m256i y2 = _mm256_set1_epi32(((uint32_t *)local)[2 ]); __m256i y3 = _mm256_set1_epi32(((uint32_t *)local)[3 ]); __m256i y4 = _mm256_set1_epi32(((uint32_t *)local)[4 ]); __m256i y5 = _mm256_set1_epi32(((uint32_t *)local)[5 ]); __m256i y6 = _mm256_set1_epi32(((uint32_t *)local)[6 ]); __m256i y7 = _mm256_set1_epi32(((uint32_t *)local)[7 ]); __m256i y8 = _mm256_set1_epi32(((uint32_t *)local)[8 ]); __m256i y9 = _mm256_set1_epi32(((uint32_t *)local)[9 ]); __m256i y10 = _mm256_set1_epi32(((uint32_t *)local)[10]); __m256i y11 = _mm256_set1_epi32(((uint32_t *)local)[11]); __m256i y12 = _mm256_set1_epi32(((uint32_t *)local)[12]); __m256i y13 = _mm256_set1_epi32(((uint32_t *)local)[13]); for (int i0 = 0; i0 + vector_width - 1 < alphabet_size; i0 += vector_width) { // set last byte switch (index0 / 4) { case 0 : y0 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break; case 1 : y1 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break; case 2 : y2 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break; case 3 : y3 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break; case 4 : y4 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break; case 5 : y5 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break; case 6 : y6 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break; case 7 : y7 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break; case 8 : y8 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break; case 9 : y9 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break; case 10: y10 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break; case 11: y11 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break; case 12: y12 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break; case 13: y13 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break; } // initialize vector __m256i a = _mm256_set1_epi32(md5_a0); __m256i b = _mm256_set1_epi32(md5_b0); __m256i c = _mm256_set1_epi32(md5_c0); __m256i d = _mm256_set1_epi32(md5_d0); // round [0, 16) md5_roundv(&a,b,c,d, y0 , 7, 0xd76aa478, md5_f); md5_roundv(&d,a,b,c, y1 , 12, 0xe8c7b756, md5_f); md5_roundv(&c,d,a,b, y2 , 17, 0x242070db, md5_f); md5_roundv(&b,c,d,a, y3 , 22, 0xc1bdceee, md5_f); md5_roundv(&a,b,c,d, y4 , 7, 0xf57c0faf, md5_f); md5_roundv(&d,a,b,c, y5 , 12, 0x4787c62a, md5_f); md5_roundv(&c,d,a,b, y6 , 17, 0xa8304613, md5_f); md5_roundv(&b,c,d,a, y7 , 22, 0xfd469501, md5_f); md5_roundv(&a,b,c,d, y8 , 7, 0x698098d8, md5_f); md5_roundv(&d,a,b,c, y9 , 12, 0x8b44f7af, md5_f); md5_roundv(&c,d,a,b, y10, 17, 0xffff5bb1, md5_f); md5_roundv(&b,c,d,a, y11, 22, 0x895cd7be, md5_f); md5_roundv(&a,b,c,d, y12, 7, 0x6b901122, md5_f); md5_roundv(&d,a,b,c, y13, 12, 0xfd987193, md5_f); md5_round (&c,d,a,b, x14, 17, 0xa679438e, md5_f); md5_round (&b,c,d,a, x15, 22, 0x49b40821, md5_f); // round [17, 32) md5_roundv(&a,b,c,d, y1 , 5, 0xf61e2562, md5_g); md5_roundv(&d,a,b,c, y6 , 9, 0xc040b340, md5_g); md5_roundv(&c,d,a,b, y11, 14, 0x265e5a51, md5_g); md5_roundv(&b,c,d,a, y0 , 20, 0xe9b6c7aa, md5_g); md5_roundv(&a,b,c,d, y5 , 5, 0xd62f105d, md5_g); md5_roundv(&d,a,b,c, y10, 9, 0x02441453, md5_g); md5_round (&c,d,a,b, x15, 14, 0xd8a1e681, md5_g); md5_roundv(&b,c,d,a, y4 , 20, 0xe7d3fbc8, md5_g); md5_roundv(&a,b,c,d, y9 , 5, 0x21e1cde6, md5_g); md5_round (&d,a,b,c, x14, 9, 0xc33707d6, md5_g); md5_roundv(&c,d,a,b, y3 , 14, 0xf4d50d87, md5_g); md5_roundv(&b,c,d,a, y8 , 20, 0x455a14ed, md5_g); md5_roundv(&a,b,c,d, y13, 5, 0xa9e3e905, md5_g); md5_roundv(&d,a,b,c, y2 , 9, 0xfcefa3f8, md5_g); md5_roundv(&c,d,a,b, y7 , 14, 0x676f02d9, md5_g); md5_roundv(&b,c,d,a, y12, 20, 0x8d2a4c8a, md5_g); // round [33, 48) md5_roundv(&a,b,c,d, y5 , 4, 0xfffa3942, md5_h); md5_roundv(&d,a,b,c, y8 , 11, 0x8771f681, md5_h); md5_roundv(&c,d,a,b, y11, 16, 0x6d9d6122, md5_h); md5_round (&b,c,d,a, x14, 23, 0xfde5380c, md5_h); md5_roundv(&a,b,c,d, y1 , 4, 0xa4beea44, md5_h); md5_roundv(&d,a,b,c, y4 , 11, 0x4bdecfa9, md5_h); md5_roundv(&c,d,a,b, y7 , 16, 0xf6bb4b60, md5_h); md5_roundv(&b,c,d,a, y10, 23, 0xbebfbc70, md5_h); md5_roundv(&a,b,c,d, y13, 4, 0x289b7ec6, md5_h); md5_roundv(&d,a,b,c, y0 , 11, 0xeaa127fa, md5_h); md5_roundv(&c,d,a,b, y3 , 16, 0xd4ef3085, md5_h); md5_roundv(&b,c,d,a, y6 , 23, 0x04881d05, md5_h); md5_roundv(&a,b,c,d, y9 , 4, 0xd9d4d039, md5_h); md5_roundv(&d,a,b,c, y12, 11, 0xe6db99e5, md5_h); md5_round (&c,d,a,b, x15, 16, 0x1fa27cf8, md5_h); md5_roundv(&b,c,d,a, y2 , 23, 0xc4ac5665, md5_h); // round [48, 61) md5_roundv(&a,b,c,d, y0 , 6, 0xf4292244, md5_i); md5_roundv(&d,a,b,c, y7 , 10, 0x432aff97, md5_i); md5_round (&c,d,a,b, x14, 15, 0xab9423a7, md5_i); md5_roundv(&b,c,d,a, y5 , 21, 0xfc93a039, md5_i); md5_roundv(&a,b,c,d, y12, 6, 0x655b59c3, md5_i); md5_roundv(&d,a,b,c, y3 , 10, 0x8f0ccc92, md5_i); md5_roundv(&c,d,a,b, y10, 15, 0xffeff47d, md5_i); md5_roundv(&b,c,d,a, y1 , 21, 0x85845dd1, md5_i); md5_roundv(&a,b,c,d, y8 , 6, 0x6fa87e4f, md5_i); md5_round (&d,a,b,c, x15, 10, 0xfe2ce6e0, md5_i); md5_roundv(&c,d,a,b, y6 , 15, 0xa3014314, md5_i); md5_roundv(&b,c,d,a, y13, 21, 0x4e0811a1, md5_i); md5_roundv(&a,b,c,d, y4 , 6, 0xf7537e82, md5_i); // compare result const __m256i cmp_a = md5_cmp(a, md5_a0, mask_a, target_a); // specialize the first 4 byte if (unlikely(not _mm256_testz_si256(cmp_a, cmp_a))) { md5_roundv(&d,a,b,c, y11, 10, 0xbd3af235, md5_i); md5_roundv(&c,d,a,b, y2 , 15, 0x2ad7d2bb, md5_i); md5_roundv(&b,c,d,a, y9 , 21, 0xeb86d391, md5_i); const __m256i cmp_d = md5_cmp(d, md5_d0, mask_d, target_d); const __m256i cmp_c = md5_cmp(c, md5_c0, mask_c, target_c); const __m256i cmp_b = md5_cmp(b, md5_b0, mask_b, target_b); const __m256i cmp_ad = cmp_a & cmp_d; const __m256i cmp_bc = cmp_b & cmp_c; if (unlikely(not _mm256_testz_si256(cmp_ad, cmp_bc))) { uint32_t cmp[vector_width]; _mm256_storeu_si256((__m256i *)cmp, cmp_ad & cmp_bc); repeat (i, vector_width) if (not found and cmp[i]) { #pragma omp critical { if (not found) { found = true; memcpy(buffer, local, pow_md5_block_length); buffer[index0] = alphabet[i0 + i]; buffer[size] = 0; } } } } } // break } if (index2 == -1 or found) break; } if (index3 == -1 or found) break; } if (index4 == -1 or found) break; } if (index5 == -1 or found) break; } if (index6 == -1 or found) break; } if (index7 == -1 or found) break; } } // leave free(padded_alphabet); pow_md5_count = cnt; return found; }
quicksort.h
// -*- C++ -*- // Copyright (C) 2007, 2008, 2009, 2010 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/quicksort.h * @brief Implementation of a unbalanced parallel quicksort (in-place). * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Johannes Singler. #ifndef _GLIBCXX_PARALLEL_QUICKSORT_H #define _GLIBCXX_PARALLEL_QUICKSORT_H 1 #include <parallel/parallel.h> #include <parallel/partition.h> namespace __gnu_parallel { /** @brief Unbalanced quicksort divide step. * @param __begin Begin iterator of subsequence. * @param __end End iterator of subsequence. * @param __comp Comparator. * @param __pivot_rank Desired __rank of the pivot. * @param __num_samples Choose pivot from that many samples. * @param __num_threads Number of threads that are allowed to work on * this part. */ template<typename _RAIter, typename _Compare> typename std::iterator_traits<_RAIter>::difference_type __parallel_sort_qs_divide(_RAIter __begin, _RAIter __end, _Compare __comp, typename std::iterator_traits <_RAIter>::difference_type __pivot_rank, typename std::iterator_traits <_RAIter>::difference_type __num_samples, _ThreadIndex __num_threads) { typedef std::iterator_traits<_RAIter> _TraitsType; typedef typename _TraitsType::value_type _ValueType; typedef typename _TraitsType::difference_type _DifferenceType; _DifferenceType __n = __end - __begin; __num_samples = std::min(__num_samples, __n); // Allocate uninitialized, to avoid default constructor. _ValueType* __samples = static_cast<_ValueType*> (::operator new(__num_samples * sizeof(_ValueType))); for (_DifferenceType __s = 0; __s < __num_samples; ++__s) { const unsigned long long __index = static_cast<unsigned long long> (__s) * __n / __num_samples; ::new(&(__samples[__s])) _ValueType(__begin[__index]); } __gnu_sequential::sort(__samples, __samples + __num_samples, __comp); _ValueType& __pivot = __samples[__pivot_rank * __num_samples / __n]; __gnu_parallel::__binder2nd<_Compare, _ValueType, _ValueType, bool> __pred(__comp, __pivot); _DifferenceType __split = __parallel_partition(__begin, __end, __pred, __num_threads); ::operator delete(__samples); return __split; } /** @brief Unbalanced quicksort conquer step. * @param __begin Begin iterator of subsequence. * @param __end End iterator of subsequence. * @param __comp Comparator. * @param __num_threads Number of threads that are allowed to work on * this part. */ template<typename _RAIter, typename _Compare> void __parallel_sort_qs_conquer(_RAIter __begin, _RAIter __end, _Compare __comp, _ThreadIndex __num_threads) { typedef std::iterator_traits<_RAIter> _TraitsType; typedef typename _TraitsType::value_type _ValueType; typedef typename _TraitsType::difference_type _DifferenceType; if (__num_threads <= 1) { __gnu_sequential::sort(__begin, __end, __comp); return; } _DifferenceType __n = __end - __begin, __pivot_rank; if (__n <= 1) return; _ThreadIndex __num_threads_left; if ((__num_threads % 2) == 1) __num_threads_left = __num_threads / 2 + 1; else __num_threads_left = __num_threads / 2; __pivot_rank = __n * __num_threads_left / __num_threads; _DifferenceType __split = __parallel_sort_qs_divide (__begin, __end, __comp, __pivot_rank, _Settings::get().sort_qs_num_samples_preset, __num_threads); #pragma omp parallel sections num_threads(2) { #pragma omp section __parallel_sort_qs_conquer(__begin, __begin + __split, __comp, __num_threads_left); #pragma omp section __parallel_sort_qs_conquer(__begin + __split, __end, __comp, __num_threads - __num_threads_left); } } /** @brief Unbalanced quicksort main call. * @param __begin Begin iterator of input sequence. * @param __end End iterator input sequence, ignored. * @param __comp Comparator. * @param __num_threads Number of threads that are allowed to work on * this part. */ template<typename _RAIter, typename _Compare> void __parallel_sort_qs(_RAIter __begin, _RAIter __end, _Compare __comp, _ThreadIndex __num_threads) { _GLIBCXX_CALL(__n) typedef std::iterator_traits<_RAIter> _TraitsType; typedef typename _TraitsType::value_type _ValueType; typedef typename _TraitsType::difference_type _DifferenceType; _DifferenceType __n = __end - __begin; // At least one element per processor. if (__num_threads > __n) __num_threads = static_cast<_ThreadIndex>(__n); __parallel_sort_qs_conquer( __begin, __begin + __n, __comp, __num_threads); } } //namespace __gnu_parallel #endif /* _GLIBCXX_PARALLEL_QUICKSORT_H */
regionTest1.c
void foo(int a) { bar(a-1); #pragma omp parallel { int b; bar(a-1); b = 20; } } void bar(int a) { if (a > 10) { foo(a-1); } } int main () { int x; #pragma omp parallel { int x; x = 10 + 20; foo(x); } if (x < 10) { #pragma omp parallel { int y; y = 10; // foo(y); } } else { #pragma omp parallel { int z; z = 10; // foo(z); } } }
GB_binop__bget_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bget_int64) // A.*B function (eWiseMult): GB (_AemultB_08__bget_int64) // A.*B function (eWiseMult): GB (_AemultB_02__bget_int64) // A.*B function (eWiseMult): GB (_AemultB_04__bget_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_int64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bget_int64) // C+=b function (dense accum): GB (_Cdense_accumb__bget_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_int64) // C=scalar+B GB (_bind1st__bget_int64) // C=scalar+B' GB (_bind1st_tran__bget_int64) // C=A+scalar GB (_bind2nd__bget_int64) // C=A'+scalar GB (_bind2nd_tran__bget_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = GB_BITGET (aij, bij, int64_t, 64) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITGET (x, y, int64_t, 64) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_INT64 || GxB_NO_BGET_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bget_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bget_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bget_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bget_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bget_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bget_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bget_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bget_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bget_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITGET (x, bij, int64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bget_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITGET (aij, y, int64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (x, aij, int64_t, 64) ; \ } GrB_Info GB (_bind1st_tran__bget_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (aij, y, int64_t, 64) ; \ } GrB_Info GB (_bind2nd_tran__bget_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
variables.c
#include <stdio.h> int main(void) { int var1 = 1, var2 = 2; #pragma omp parallel private(var1, var2) { printf("Region 1: var1=%i, var2=%i\n", var1, var2); var1++; var2++; } printf("After region 1: var1=%i, var2=%i\n\n", var1, var2); #pragma omp parallel firstprivate(var1, var2) { printf("Region 2: var1=%i, var2=%i\n", var1, var2); var1++; var2++; } printf("After region 2: var1=%i, var2=%i\n\n", var1, var2); #pragma omp parallel /* same as omp parallel shared(var1, var2) */ { printf("Region 3: var1=%i, var2=%i\n", var1, var2); /* Note that this introduces the data race condition! */ var1++; var2++; } printf("After region 3: var1=%i, var2=%i\n\n", var1, var2); return 0; }
residualbased_predictorcorrector_velocity_bdf_scheme_turbulent.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // #if !defined(KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BDF_TURBULENT_SCHEME ) #define KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BDF_TURBULENT_SCHEME /* System includes */ /* External includes */ #include "boost/smart_ptr.hpp" /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "solving_strategies/schemes/scheme.h" #include "includes/variables.h" #include "includes/cfd_variables.h" #include "containers/array_1d.h" #include "utilities/openmp_utils.h" #include "utilities/coordinate_transformation_utilities.h" #include "processes/process.h" namespace Kratos { /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ /// BDF2 time scheme for the incompressible flow problem. /** This scheme implements update operations and the calculation of the BDF coefficients for variable time step sizes. * * WARNING: this scheme assumes that the element internally implements the BDF2 scheme and is hence NOT compatible with the * elements ASGS2D, ASGS3D, VMS, MonolithicWallConditon * * the compatible element so far is * @see TwoFluidVMS * * note also that in the prediction step only the velocity, and NOT the pressure is extrapolated in time. */ template<class TSparseSpace, class TDenseSpace //= DenseSpace<double> > class ResidualBasedPredictorCorrectorBDFSchemeTurbulent : public Scheme<TSparseSpace, TDenseSpace> { public: /**@name Type Definitions */ /*@{ */ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedPredictorCorrectorBDFSchemeTurbulent); typedef Scheme<TSparseSpace, TDenseSpace> BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename Element::DofsVectorType DofsVectorType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef Element::GeometryType GeometryType; /*@} */ /**@name Life Cycle */ /*@{ */ /** Constructor without a turbulence model */ ResidualBasedPredictorCorrectorBDFSchemeTurbulent( unsigned int DomainSize) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP) // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. {} /** Constructor without a turbulence model */ ResidualBasedPredictorCorrectorBDFSchemeTurbulent( unsigned int DomainSize, Kratos::Flags& rSlipFlag) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,rSlipFlag) // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. {} /** Constructor with a turbulence model */ ResidualBasedPredictorCorrectorBDFSchemeTurbulent( unsigned int DomainSize, Process::Pointer pTurbulenceModel) : Scheme<TSparseSpace, TDenseSpace>(), mpTurbulenceModel(pTurbulenceModel), mRotationTool(DomainSize,DomainSize+1,SLIP) // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs {} /** Destructor. */ ~ResidualBasedPredictorCorrectorBDFSchemeTurbulent() override { } /*@} */ /**@name Operators */ /*@{ */ /** Performing the update of the solution. */ //*************************************************************************** void Update(ModelPart& r_model_part, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) override { KRATOS_TRY; mRotationTool.RotateVelocities(r_model_part); mpDofUpdater->UpdateDofs(rDofSet,Dv); mRotationTool.RecoverVelocities(r_model_part); KRATOS_CATCH("") } //*************************************************************************** //predicts the solution at the current step as // v = vold void Predict(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) override { ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); double Dt = rCurrentProcessInfo[DELTA_TIME]; double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME]; if(Dt != 0.0 && OldDt != 0) { //estimate acceleration from velocity in the past and predict the future. Note that pressure is NOT predicted const ModelPart::NodesContainerType::iterator it_begin = rModelPart.NodesBegin(); array_1d<double,3> dv; //in the next loop we do for each node //vn+1 = vn + dt*(vn - vn-1)/oldDt #pragma omp parallel for private(dv) for(int i=0; i< static_cast<int>(rModelPart.Nodes().size()); i++) { ModelPart::NodesContainerType::iterator it = it_begin + i; const array_1d<double,3>& aux = it->FastGetSolutionStepValue(VELOCITY,1); noalias(dv) = aux; noalias(dv) -= it->FastGetSolutionStepValue(VELOCITY,2); array_1d<double,3>& v = it->FastGetSolutionStepValue(VELOCITY); const double dt_ratio = Dt/OldDt; if(it->IsFixed(VELOCITY_X) == false) v[0] = aux[0] + dt_ratio*dv[0]; if(it->IsFixed(VELOCITY_Y) == false) v[1] = aux[1] + dt_ratio*dv[1]; if(it->IsFixed(VELOCITY_Z) == false) v[2] = aux[2] + dt_ratio*dv[2]; //noalias(v) = aux; //noalias( v ) += () * dv; } } else { if (rModelPart.GetCommunicator().MyPID() == 0) std::cout << "predict is doing nothing since OldDt = " << OldDt << "and Dt = " << Dt << std::endl; } // if (rModelPart.GetCommunicator().MyPID() == 0) // std::cout << "end of prediction" << std::endl; } //*************************************************************************** /** this function is designed to be called in the builder and solver to introduce the selected time integration scheme. It "asks" the matrix needed to the element and performs the operations needed to introduce the seected time integration scheme. this function calculates at the same time the contribution to the LHS and to the RHS of the system */ void CalculateSystemContributions(Element::Pointer rCurrentElement, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY //Initializing the non linear iteration for the current element (rCurrentElement) -> InitializeNonLinearIteration(CurrentProcessInfo); //basic operations for the element considered (rCurrentElement)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); (rCurrentElement)->EquationIdVector(EquationId, CurrentProcessInfo); // If there is a slip condition, apply it on a rotated system of coordinates mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentElement->GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentElement->GetGeometry()); KRATOS_CATCH("") } void Calculate_RHS_Contribution(Element::Pointer rCurrentElement, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& CurrentProcessInfo) override { //Initializing the non linear iteration for the current element (rCurrentElement) -> InitializeNonLinearIteration(CurrentProcessInfo); //basic operations for the element considered (rCurrentElement)->CalculateRightHandSide(RHS_Contribution, CurrentProcessInfo); (rCurrentElement)->EquationIdVector(EquationId, CurrentProcessInfo); // If there is a slip condition, apply it on a rotated system of coordinates mRotationTool.Rotate(RHS_Contribution,rCurrentElement->GetGeometry()); mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentElement->GetGeometry()); } /** functions totally analogous to the precedent but applied to the "condition" objects */ void Condition_CalculateSystemContributions(Condition::Pointer rCurrentCondition, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY //KRATOS_WATCH("CONDITION LOCALVELOCITYCONTRIBUTION IS NOT DEFINED"); (rCurrentCondition) -> InitializeNonLinearIteration(CurrentProcessInfo); (rCurrentCondition)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); (rCurrentCondition)->EquationIdVector(EquationId, CurrentProcessInfo); // Rotate contributions (to match coordinates for slip conditions) mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentCondition->GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentCondition->GetGeometry()); KRATOS_CATCH("") } void Condition_Calculate_RHS_Contribution(Condition::Pointer rCurrentCondition, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; //KRATOS_WATCH("CONDITION LOCALVELOCITYCONTRIBUTION IS NOT DEFINED"); //Initializing the non linear iteration for the current condition (rCurrentCondition) -> InitializeNonLinearIteration(rCurrentProcessInfo); //basic operations for the element considered (rCurrentCondition)->CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo); (rCurrentCondition)->EquationIdVector(EquationId,rCurrentProcessInfo); // Rotate contributions (to match coordinates for slip conditions) mRotationTool.Rotate(RHS_Contribution,rCurrentCondition->GetGeometry()); mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentCondition->GetGeometry()); KRATOS_CATCH(""); } //************************************************************************************* //************************************************************************************* void InitializeSolutionStep(ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { ProcessInfo& rCurrentProcessInfo = r_model_part.GetProcessInfo(); if (r_model_part.GetBufferSize() != 3) KRATOS_THROW_ERROR(std::logic_error, "wrong buffer size. Expects 3, currently: ", r_model_part.GetBufferSize()); //calculate the BDF coefficients double Dt = rCurrentProcessInfo[DELTA_TIME]; double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME]; if(OldDt == 0.0) KRATOS_THROW_ERROR(std::logic_error,"found an OldDt = 0.0 in InitializeSolutionStep",""); double Rho = OldDt / Dt; double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho); Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; if(BDFcoeffs.size() != 3) BDFcoeffs.resize(3, false); BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant) BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant) BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant) Scheme<TSparseSpace, TDenseSpace>::InitializeSolutionStep(r_model_part, A, Dx, b); } //************************************************************************************* //************************************************************************************* void InitializeNonLinIteration(ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY if (mpTurbulenceModel != 0) // If not null mpTurbulenceModel->Execute(); KRATOS_CATCH("") } void FinalizeNonLinIteration(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); //if orthogonal subscales are computed if (CurrentProcessInfo[OSS_SWITCH] == 1.0) { KRATOS_INFO_IF("ResidualBasedPredictorCorrectorBDFSchemeTurbulent", rModelPart.GetCommunicator().MyPID() == 0) << "Computing OSS projections" << std::endl; for (typename ModelPart::NodesContainerType::iterator ind = rModelPart.NodesBegin(); ind != rModelPart.NodesEnd(); ind++) { noalias(ind->FastGetSolutionStepValue(ADVPROJ)) = ZeroVector(3); ind->FastGetSolutionStepValue(DIVPROJ) = 0.0; ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0; }//end of loop over nodes //loop on nodes to compute ADVPROJ CONVPROJ NODALAREA array_1d<double, 3 > output; for (typename ModelPart::ElementsContainerType::iterator elem = rModelPart.ElementsBegin(); elem != rModelPart.ElementsEnd(); elem++) { elem->Calculate(ADVPROJ, output, CurrentProcessInfo); } rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ); for (typename ModelPart::NodesContainerType::iterator ind = rModelPart.NodesBegin(); ind != rModelPart.NodesEnd(); ind++) { if (ind->FastGetSolutionStepValue(NODAL_AREA) == 0.0) { ind->FastGetSolutionStepValue(NODAL_AREA) = 1.0; //KRATOS_WATCH("*********ATTENTION: NODAL AREA IS ZERRROOOO************"); } const double Area = ind->FastGetSolutionStepValue(NODAL_AREA); ind->FastGetSolutionStepValue(ADVPROJ) /= Area; ind->FastGetSolutionStepValue(DIVPROJ) /= Area; } } } void FinalizeSolutionStep(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { ComputeReactions(rModelPart, A, Dx, b); //Element::EquationIdVectorType EquationId; //LocalSystemVectorType RHS_Contribution; //LocalSystemMatrixType LHS_Contribution; //ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); // //ModelPart::NodeIterator itnodes_begin = rModelPart.NodesBegin(); //const int nnodes = static_cast<int>(rModelPart.Nodes().size()); //#pragma omp parallel for firstprivate(nnodes, itnodes_begin) //for(int i=0; i<nnodes; i++) //{ // ModelPart::NodeIterator itNode = itnodes_begin + i; // (itNode->FastGetSolutionStepValue(REACTION)).clear(); //} // // //ModelPart::ElementsContainerType::iterator itelem_begin = rModelPart.ElementsBegin(); //const int nelems = static_cast<int>(rModelPart.Elements().size()); // #pragma omp parallel for firstprivate(nelems, itelem_begin) //for(int i=0; i<nelems; i++) //{ // ModelPart::ElementsContainerType::iterator itElem = itelem_begin + i; // // (itElem)->InitializeNonLinearIteration(CurrentProcessInfo); // (itElem)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); //TODO: call CalculateRHS instead // GeometryType& rGeom = (itElem)->GetGeometry(); // const int NumNodes = static_cast<int>(rGeom.PointsNumber()); // unsigned int Dimension = rGeom.WorkingSpaceDimension(); // unsigned int index = 0; // // for (int i = 0; i < NumNodes; i++) // { // // array_1d<double,3>& rReaction = rGeom[i].FastGetSolutionStepValue(REACTION); // rGeom[i].SetLock(); // rReaction[0] -= RHS_Contribution[index++]; // rReaction[1] -= RHS_Contribution[index++]; // if (Dimension == 3) rReaction[2] -= RHS_Contribution[index++]; // rGeom[i].UnSetLock(); // index++; // skip pressure dof // // } //} // //rModelPart.GetCommunicator().AssembleCurrentData(REACTION); // //#pragma omp parallel for firstprivate(nelems, itelem_begin) //for(int i=0; i<nelems; i++) //{ // ModelPart::ElementsContainerType::iterator itElem = itelem_begin + i; // (itElem)->FinalizeSolutionStep(CurrentProcessInfo); //} } virtual void ComputeReactions(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) { Element::EquationIdVectorType EquationId; LocalSystemVectorType RHS_Contribution; LocalSystemMatrixType LHS_Contribution; ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); ModelPart::NodeIterator itnodes_begin = rModelPart.NodesBegin(); const int nnodes = static_cast<int>(rModelPart.Nodes().size()); #pragma omp parallel for firstprivate(nnodes, itnodes_begin) for (int i = 0; i<nnodes; i++) { ModelPart::NodeIterator itNode = itnodes_begin + i; (itNode->FastGetSolutionStepValue(REACTION)).clear(); } ModelPart::ElementsContainerType::iterator itelem_begin = rModelPart.ElementsBegin(); const int nelems = static_cast<int>(rModelPart.Elements().size()); #pragma omp parallel for firstprivate(nelems, itelem_begin) for (int i = 0; i<nelems; i++) { ModelPart::ElementsContainerType::iterator itElem = itelem_begin + i; (itElem)->InitializeNonLinearIteration(CurrentProcessInfo); (itElem)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); //TODO: call CalculateRHS instead GeometryType& rGeom = (itElem)->GetGeometry(); const int NumNodes = static_cast<int>(rGeom.PointsNumber()); unsigned int Dimension = rGeom.WorkingSpaceDimension(); unsigned int index = 0; for (int i = 0; i < NumNodes; i++) { array_1d<double, 3>& rReaction = rGeom[i].FastGetSolutionStepValue(REACTION); rGeom[i].SetLock(); rReaction[0] -= RHS_Contribution[index++]; rReaction[1] -= RHS_Contribution[index++]; if (Dimension == 3) rReaction[2] -= RHS_Contribution[index++]; rGeom[i].UnSetLock(); index++; // skip pressure dof } } rModelPart.GetCommunicator().AssembleCurrentData(REACTION); #pragma omp parallel for firstprivate(nelems, itelem_begin) for (int i = 0; i<nelems; i++) { ModelPart::ElementsContainerType::iterator itElem = itelem_begin + i; (itElem)->FinalizeSolutionStep(CurrentProcessInfo); } } //************************************************************************************************ //************************************************************************************************ /// Free memory allocated by this object. void Clear() override { this->mpDofUpdater->Clear(); } /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ Process::Pointer mpTurbulenceModel; CoordinateTransformationUtils<LocalSystemMatrixType, LocalSystemVectorType, double> mRotationTool; /*@} */ /**@name Protected Operators*/ /*@{ */ /*@} */ /**@name Protected Operations*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); /*@} */ /**@name Private Operators*/ /*@{ */ /*@} */ /**@name Private Operations*/ /*@{ */ /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class Scheme */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BDF_TURBULENT_SCHEME defined */
csrmv_merge.h
#ifndef __CSRMV_MERGE_H__ #define __CSRMV_MERGE_H__ #include <algorithm> #include "complex_ops.h" #include "openmp.h" #include "numpy/ndarraytypes.h" // See work my Merrill et. al. (http://ieeexplore.ieee.org/abstract/document/7877136/) for original work and implementation. // This code contains modified versions of algorithms 2 and 3. template<class I> class CountingInputIterator{ const I init; public: CountingInputIterator(I _init) : init(_init) {} I operator[](I i){return init+i;} }; template<class I> struct CoordinateT{ I x,y; CoordinateT(I _x,I _y) : x(_x), y(_y) {} }; template<class I,class AIteratorT,class BIteratorT> CoordinateT<I> MergePathSearch(I diagonal, I a_len, I b_len, AIteratorT a, BIteratorT b) { // Diagonal search range (in x coordinate space) I zero = 0; I x_min = std::max(diagonal - b_len, zero); I x_max = std::min(diagonal, a_len); // 2D binary-search along the diagonal search range while (x_min < x_max) { I pivot = (x_min + x_max) >> 1; if (a[pivot] <= b[diagonal - pivot - 1]) { // Keep top-right half of diagonal range x_min = pivot + 1; } else { // Keep bottom-left half of diagonal range x_max = pivot; } } return CoordinateT<I>( std::min(x_min, a_len), // x coordinate in A diagonal - x_min); // y coordinate in B } template<class I,class T1,class T2,class T3> void csrmv_merge(const bool overwrite_y, const I num_rows, const I row_offsets[], const I column_indices[], const T1 values[], const T2 alpha, const T3 x[], I row_carry_out[], T3 value_carry_out[], T3 y[]) { const I* row_end_offsets = row_offsets + 1; // Merge list A: row end-offsets const I num_nonzeros = row_offsets[num_rows]; int num_threads = omp_get_num_threads(); CountingInputIterator<I> nz_indices(0); // Merge list B: Natural numbers(NZ indices) I num_merge_items = num_rows + num_nonzeros; // Merge path total length I items_per_thread = (num_merge_items + num_threads - 1) / num_threads; // Merge items per thread if(overwrite_y){ // Spawn parallel threads #pragma omp for schedule(static,1) for (int tid = 0; tid < num_threads; tid++) { // Find starting and ending MergePath coordinates (row-idx, nonzero-idx) for each thread I diagonal = std::min(items_per_thread * tid, num_merge_items); I diagonal_end = std::min(diagonal + items_per_thread, num_merge_items); CoordinateT<I> thread_coord = MergePathSearch(diagonal, num_rows, num_nonzeros, row_end_offsets, nz_indices); CoordinateT<I> thread_coord_end = MergePathSearch(diagonal_end, num_rows, num_nonzeros,row_end_offsets, nz_indices); // if(overwrite_y){ // std::fill(y+thread_coord.x,y+thread_coord_end.x,T3(0)); // } // Consume merge items, whole rows first T3 running_total = T3(0); for (; thread_coord.x < thread_coord_end.x; ++thread_coord.x) { I row_end_offset = row_end_offsets[thread_coord.x]; for (; thread_coord.y < row_end_offset; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y]]; y[thread_coord.x] = alpha * running_total; running_total = T3(0); } // Consume partial portion of thread's last row for (; thread_coord.y < thread_coord_end.y; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y]]; // Save carry-outs row_carry_out[tid] = thread_coord_end.x; value_carry_out[tid] = running_total; } } else{ // Spawn parallel threads #pragma omp for schedule(static,1) for (int tid = 0; tid < num_threads; tid++) { // Find starting and ending MergePath coordinates (row-idx, nonzero-idx) for each thread I diagonal = std::min(items_per_thread * tid, num_merge_items); I diagonal_end = std::min(diagonal + items_per_thread, num_merge_items); CoordinateT<I> thread_coord = MergePathSearch(diagonal, num_rows, num_nonzeros, row_end_offsets, nz_indices); CoordinateT<I> thread_coord_end = MergePathSearch(diagonal_end, num_rows, num_nonzeros,row_end_offsets, nz_indices); // if(overwrite_y){ // std::fill(y+thread_coord.x,y+thread_coord_end.x,T3(0)); // } // Consume merge items, whole rows first T3 running_total = T3(0); for (; thread_coord.x < thread_coord_end.x; ++thread_coord.x) { I row_end_offset = row_end_offsets[thread_coord.x]; for (; thread_coord.y < row_end_offset; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y]]; y[thread_coord.x] += alpha * running_total; running_total = T3(0); } // Consume partial portion of thread's last row for (; thread_coord.y < thread_coord_end.y; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y]]; // Save carry-outs row_carry_out[tid] = thread_coord_end.x; value_carry_out[tid] = running_total; } } // Carry-out fix-up (rows spanning multiple threads) #pragma omp single { for (int tid = 0; tid < num_threads - 1; ++tid) if (row_carry_out[tid] < num_rows) y[row_carry_out[tid]] += alpha * value_carry_out[tid]; } } template<class I,class T1,class T2,class T3> void csrmv_merge_strided(const bool overwrite_y, const I num_rows, const I row_offsets[], const I column_indices[], const T1 values[], const T2 alpha, const npy_intp stride_x, const T3 x[], I row_carry_out[], T3 value_carry_out[], const npy_intp stride_y, T3 y[]) { const I* row_end_offsets = row_offsets + 1; // Merge list A: row end-offsets const I num_nonzeros = row_offsets[num_rows]; int num_threads = omp_get_num_threads(); CountingInputIterator<I> nz_indices(0); // Merge list B: Natural numbers(NZ indices) I num_merge_items = num_rows + num_nonzeros; // Merge path total length I items_per_thread = (num_merge_items + num_threads - 1) / num_threads; // Merge items per thread // if(overwrite_y){ // #pragma omp for schedule(static) // for(I i=0;i<num_rows;i++){ // y[i * stride_y] = 0; // } // } if(overwrite_y){ // Spawn parallel threads #pragma omp for schedule(static,1) for (int tid = 0; tid < num_threads; tid++) { // Find starting and ending MergePath coordinates (row-idx, nonzero-idx) for each thread I diagonal = std::min(items_per_thread * tid, num_merge_items); I diagonal_end = std::min(diagonal + items_per_thread, num_merge_items); CoordinateT<I> thread_coord = MergePathSearch(diagonal, num_rows, num_nonzeros, row_end_offsets, nz_indices); CoordinateT<I> thread_coord_end = MergePathSearch(diagonal_end, num_rows, num_nonzeros,row_end_offsets, nz_indices); // Consume merge items, whole rows first T3 running_total = 0.0; for (; thread_coord.x < thread_coord_end.x; ++thread_coord.x) { I row_end_offset = row_end_offsets[thread_coord.x]; for (; thread_coord.y < row_end_offset; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y] * stride_x]; y[thread_coord.x * stride_y] = alpha * running_total; // assign vs. add in-place running_total = 0.0; } // Consume partial portion of thread's last row for (; thread_coord.y < thread_coord_end.y; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y] * stride_x]; // Save carry-outs row_carry_out[tid] = thread_coord_end.x; value_carry_out[tid] = running_total; } } else{ // Spawn parallel threads #pragma omp for schedule(static,1) for (int tid = 0; tid < num_threads; tid++) { // Find starting and ending MergePath coordinates (row-idx, nonzero-idx) for each thread I diagonal = std::min(items_per_thread * tid, num_merge_items); I diagonal_end = std::min(diagonal + items_per_thread, num_merge_items); CoordinateT<I> thread_coord = MergePathSearch(diagonal, num_rows, num_nonzeros, row_end_offsets, nz_indices); CoordinateT<I> thread_coord_end = MergePathSearch(diagonal_end, num_rows, num_nonzeros,row_end_offsets, nz_indices); // Consume merge items, whole rows first T3 running_total = 0.0; for (; thread_coord.x < thread_coord_end.x; ++thread_coord.x) { I row_end_offset = row_end_offsets[thread_coord.x]; for (; thread_coord.y < row_end_offset; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y] * stride_x]; y[thread_coord.x * stride_y] += alpha * running_total; // add in-place vs. assign running_total = 0.0; } // Consume partial portion of thread's last row for (; thread_coord.y < thread_coord_end.y; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y] * stride_x]; // Save carry-outs row_carry_out[tid] = thread_coord_end.x; value_carry_out[tid] = running_total; } } // Carry-out fix-up (rows spanning multiple threads) #pragma omp single { for (int tid = 0; tid < num_threads - 1; ++tid) if (row_carry_out[tid] < num_rows) y[row_carry_out[tid] * stride_y] += alpha * value_carry_out[tid]; } } #endif
GB_unaryop__minv_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_bool_fp64 // op(A') function: GB_tran__minv_bool_fp64 // C type: bool // A type: double // cast: ; // unaryop: cij = true #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = true ; // casting #define GB_CASTING(z, x) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_bool_fp64 ( bool *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_bool_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
triad.h
#ifndef TRIAD_H #define TRIAD_H namespace TSnap { ///////////////////////////////////////////////// // Triads and clustering coefficient /// Computes the average clustering coefficient as defined in Watts and Strogatz, Collective dynamics of 'small-world' networks. ##TSnap::GetClustCf template <class PGraph> double GetClustCf(const PGraph& Graph, int SampleNodes=-1); /// Computes the distribution of average clustering coefficient. ##TSnap::GetClustCf1 template <class PGraph> double GetClustCf(const PGraph& Graph, TFltPrV& DegToCCfV, int SampleNodes=-1); /// Computes the distribution of average clustering coefficient as well as the number of open and closed triads in the graph. ##TSnap::GetClustCf2 template <class PGraph> double GetClustCf(const PGraph& Graph, TFltPrV& DegToCCfV, int64& ClosedTriadsX, int64& OpenTriadsX, int SampleNodes=-1); /// Computes the distribution of average clustering coefficient as well as the number of open and closed triads in the graph. ##TSnap::GetClustCfAll template <class PGraph> double GetClustCfAll(const PGraph& Graph, TFltPrV& DegToCCfV, int64& ClosedTriadsX, int64& OpenTriadsX, int SampleNodes=-1); /// Returns clustering coefficient of a particular node. ##TSnap::GetNodeClustCf template <class PGraph> double GetNodeClustCf(const PGraph& Graph, const int& NId); /// Computes clustering coefficient of each node of the Graph. ##TSnap::GetClustCf1 template <class PGraph> void GetNodeClustCf(const PGraph& Graph, TIntFltH& NIdCCfH); /// Returns the number of triangles in a graph. ##TSnap::GetTriads template <class PGraph> int64 GetTriads(const PGraph& Graph, int SampleNodes=-1); /// Computes the number of Closed and Open triads. ##TSnap::GetTriads1 template <class PGraph> int64 GetTriads(const PGraph& Graph, int64& ClosedTriadsX, int64& OpenTriadsX, int SampleNodes); /// Computes the number of Closed and Open triads. ##TSnap::GetTriadsAll template <class PGraph> int64 GetTriadsAll(const PGraph& Graph, int64& ClosedTriadsX, int64& OpenTriadsX, int SampleNodes=-1); /// Computes the number of open and close triads for every node of the network. ##TSnap::GetTriads2 template <class PGraph> void GetTriads(const PGraph& Graph, TIntTrV& NIdCOTriadV, int SampleNodes=-1); /// Counts the number of edges that participate in at least one triad. ##TSnap::GetTriadEdges template <class PGraph> int GetTriadEdges(const PGraph& Graph, int SampleEdges=-1); /// Returns the number of undirected triads a node \c NId participates in. ##TSnap::GetNodeTriads template <class PGraph> int GetNodeTriads(const PGraph& Graph, const int& NId); /// Returns number of Open and Closed triads a node \c NId participates in. ##TSnap::GetNodeTriads1 template <class PGraph> int GetNodeTriads(const PGraph& Graph, const int& NId, int& ClosedNTriadsX, int& OpenNTriadsX); /// Returns number of Open and Closed triads a node \c NId participates in. ##TSnap::GetNodeTriadsAll template <class PGraph> int GetNodeTriadsAll(const PGraph& Graph, const int& NId, int& ClosedNTriadsX, int& OpenNTriadsX); /// Returns the number of triads between a node \c NId and a subset of its neighbors \c GroupSet. ##TSnap::GetNodeTriads3 template <class PGraph> int GetNodeTriads(const PGraph& Graph, const int& NId, const TIntSet& GroupSet, int& InGroupEdgesX, int& InOutGroupEdgesX, int& OutGroupEdgesX); /// Triangle Participation Ratio: For each node counts how many triangles it participates in and then returns a set of pairs (number of triangles, number of such nodes). ##TSnap::GetTriadParticip template <class PGraph> void GetTriadParticip(const PGraph& Graph, TIntPrV& TriadCntV); /// Returns a number of shared neighbors between a pair of nodes NId1 and NId2. template<class PGraph> int GetCmnNbrs(const PGraph& Graph, const int& NId1, const int& NId2); /// Returns the shared neighbors between a pair of nodes NId1 and NId2. template<class PGraph> int GetCmnNbrs(const PGraph& Graph, const int& NId1, const int& NId2, TIntV& NbrV); /// Returns the number of length 2 directed paths between a pair of nodes NId1, NId2 (NId1 --> U --> NId2). template<class PGraph> int GetLen2Paths(const PGraph& Graph, const int& NId1, const int& NId2); /// Returns the 2 directed paths between a pair of nodes NId1, NId2 (NId1 --> U --> NId2). ##TSnap::GetLen2Paths template<class PGraph> int GetLen2Paths(const PGraph& Graph, const int& NId1, const int& NId2, TIntV& NbrV); /// Returns the number of triangles in graph \c Graph. template<class PGraph> int64 GetTriangleCnt(const PGraph& Graph); /// Merges neighbors by removing duplicates and produces one sorted vector of neighbors. template<class PGraph> void MergeNbrs(TIntV& NeighbourV, const typename PGraph::TObj::TNodeI& NI); /// Returns sorted vector \c NbrV containing unique in or out neighbors of node \c NId in graph \c Graph template <class PGraph> void GetUniqueNbrV(const PGraph& Graph, const int& NId, TIntV& NbrV); /// Returns the number of common elements in two sorted TInt vectors int GetCommon(TIntV& A, TIntV& B); ///////////////////////////////////////////////// // Implementation template <class PGraph> double GetClustCf(const PGraph& Graph, int SampleNodes) { TIntTrV NIdCOTriadV; GetTriads(Graph, NIdCOTriadV, SampleNodes); if (NIdCOTriadV.Empty()) { return 0.0; } double SumCcf = 0.0; for (int i = 0; i < NIdCOTriadV.Len(); i++) { const double OpenCnt = NIdCOTriadV[i].Val2()+NIdCOTriadV[i].Val3(); if (OpenCnt > 0) { SumCcf += NIdCOTriadV[i].Val2() / OpenCnt; } } IAssert(SumCcf>=0); return SumCcf / double(NIdCOTriadV.Len()); } template <class PGraph> double GetClustCf(const PGraph& Graph, TFltPrV& DegToCCfV, int SampleNodes) { TIntTrV NIdCOTriadV; GetTriads(Graph, NIdCOTriadV, SampleNodes); THash<TInt, TFltPr> DegSumCnt; double SumCcf = 0.0; for (int i = 0; i < NIdCOTriadV.Len(); i++) { const int D = NIdCOTriadV[i].Val2()+NIdCOTriadV[i].Val3(); const double Ccf = D!=0 ? NIdCOTriadV[i].Val2() / double(D) : 0.0; TFltPr& SumCnt = DegSumCnt.AddDat(Graph->GetNI(NIdCOTriadV[i].Val1).GetDeg()); SumCnt.Val1 += Ccf; SumCnt.Val2 += 1; SumCcf += Ccf; } // get average clustering coefficient for each degree DegToCCfV.Gen(DegSumCnt.Len(), 0); for (int d = 0; d < DegSumCnt.Len(); d++) { DegToCCfV.Add(TFltPr(DegSumCnt.GetKey(d).Val, double(DegSumCnt[d].Val1()/DegSumCnt[d].Val2()))); } DegToCCfV.Sort(); return SumCcf / double(NIdCOTriadV.Len()); } template <class PGraph> double GetClustCf(const PGraph& Graph, TFltPrV& DegToCCfV, int64& ClosedTriads, int64& OpenTriads, int SampleNodes) { TIntTrV NIdCOTriadV; GetTriads(Graph, NIdCOTriadV, SampleNodes); THash<TInt, TFltPr> DegSumCnt; double SumCcf = 0.0; int64 closedTriads = 0; int64 openTriads = 0; for (int i = 0; i < NIdCOTriadV.Len(); i++) { const int D = NIdCOTriadV[i].Val2()+NIdCOTriadV[i].Val3(); const double Ccf = D!=0 ? NIdCOTriadV[i].Val2() / double(D) : 0.0; closedTriads += NIdCOTriadV[i].Val2; openTriads += NIdCOTriadV[i].Val3; TFltPr& SumCnt = DegSumCnt.AddDat(Graph->GetNI(NIdCOTriadV[i].Val1).GetDeg()); SumCnt.Val1 += Ccf; SumCnt.Val2 += 1; SumCcf += Ccf; } // get average clustering coefficient for each degree DegToCCfV.Gen(DegSumCnt.Len(), 0); for (int d = 0; d < DegSumCnt.Len(); d++) { DegToCCfV.Add(TFltPr(DegSumCnt.GetKey(d).Val, DegSumCnt[d].Val1()/DegSumCnt[d].Val2())); } //if(closedTriads/3 > (uint64) TInt::Mx) { WarnNotify(TStr::Fmt("[%s line %d] %g closed triads.\n", __FILE__, __LINE__, float(closedTriads/3)).CStr()); } //if(openTriads > (uint64) TInt::Mx) { WarnNotify(TStr::Fmt("[%s line %d] %g open triads.\n", __FILE__, __LINE__, float(openTriads/3)).CStr()); } ClosedTriads = closedTriads/int64(3); // each triad is counted 3 times OpenTriads = openTriads; DegToCCfV.Sort(); return SumCcf / double(NIdCOTriadV.Len()); } template <class PGraph> double GetClustCfAll(const PGraph& Graph, TFltPrV& DegToCCfV, int64& ClosedTriads, int64& OpenTriads, int SampleNodes) { return GetClustCf(Graph, DegToCCfV, ClosedTriads, OpenTriads, SampleNodes); } template <class PGraph> double GetNodeClustCf(const PGraph& Graph, const int& NId) { int Open, Closed; GetNodeTriads(Graph, NId, Open, Closed); //const double Deg = Graph->GetNI(NId).GetDeg(); return (Open+Closed)==0 ? 0 : double(Open)/double(Open+Closed); } template <class PGraph> void GetNodeClustCf(const PGraph& Graph, TIntFltH& NIdCCfH) { TIntTrV NIdCOTriadV; GetTriads(Graph, NIdCOTriadV); NIdCCfH.Clr(false); for (int i = 0; i < NIdCOTriadV.Len(); i++) { const int D = NIdCOTriadV[i].Val2()+NIdCOTriadV[i].Val3(); const double CCf = D!=0 ? NIdCOTriadV[i].Val2() / double(D) : 0.0; NIdCCfH.AddDat(NIdCOTriadV[i].Val1, CCf); } } template <class PGraph> int64 GetTriads(const PGraph& Graph, int SampleNodes) { int64 OpenTriads, ClosedTriads; return GetTriads(Graph, ClosedTriads, OpenTriads, SampleNodes); } template <class PGraph> int64 GetTriads(const PGraph& Graph, int64& ClosedTriads, int64& OpenTriads, int SampleNodes) { TIntTrV NIdCOTriadV; GetTriads(Graph, NIdCOTriadV, SampleNodes); uint64 closedTriads = 0; uint64 openTriads = 0; for (int i = 0; i < NIdCOTriadV.Len(); i++) { closedTriads += NIdCOTriadV[i].Val2; openTriads += NIdCOTriadV[i].Val3; } //IAssert(closedTriads/3 < (uint64) TInt::Mx); //IAssert(openTriads < (uint64) TInt::Mx); ClosedTriads = int64(closedTriads/3); // each triad is counted 3 times OpenTriads = int64(openTriads); return ClosedTriads; } template <class PGraph> int64 GetTriadsAll(const PGraph& Graph, int64& ClosedTriads, int64& OpenTriads, int SampleNodes) { return GetTriads(Graph, ClosedTriads, OpenTriads, SampleNodes); } // Function pretends that the graph is undirected (count unique connected triples of nodes) // This implementation is slower, it uses hash tables directly template <class PGraph> void GetTriads_v0(const PGraph& Graph, TIntTrV& NIdCOTriadV, int SampleNodes) { const bool IsDir = Graph->HasFlag(gfDirected); TIntSet NbrH; TIntV NIdV; TRnd Rnd(0); Graph->GetNIdV(NIdV); NIdV.Shuffle(Rnd); if (SampleNodes == -1) { SampleNodes = Graph->GetNodes(); } NIdCOTriadV.Clr(false); NIdCOTriadV.Reserve(SampleNodes); for (int node = 0; node < SampleNodes; node++) { typename PGraph::TObj::TNodeI NI = Graph->GetNI(NIdV[node]); if (NI.GetDeg() < 2) { NIdCOTriadV.Add(TIntTr(NI.GetId(), 0, 0)); // zero triangles continue; } // find neighborhood NbrH.Clr(false); for (int e = 0; e < NI.GetOutDeg(); e++) { if (NI.GetOutNId(e) != NI.GetId()) { NbrH.AddKey(NI.GetOutNId(e)); } } if (IsDir) { for (int e = 0; e < NI.GetInDeg(); e++) { if (NI.GetInNId(e) != NI.GetId()) { NbrH.AddKey(NI.GetInNId(e)); } } } // count connected neighbors int OpenCnt=0, CloseCnt=0; for (int srcNbr = 0; srcNbr < NbrH.Len(); srcNbr++) { const typename PGraph::TObj::TNodeI SrcNode = Graph->GetNI(NbrH.GetKey(srcNbr)); for (int dstNbr = srcNbr+1; dstNbr < NbrH.Len(); dstNbr++) { const int dstNId = NbrH.GetKey(dstNbr); if (SrcNode.IsNbrNId(dstNId)) { CloseCnt++; } // is edge else { OpenCnt++; } } } IAssert(2*(OpenCnt+CloseCnt) == NbrH.Len()*(NbrH.Len()-1)); NIdCOTriadV.Add(TIntTr(NI.GetId(), CloseCnt, OpenCnt)); } } // Function pretends that the graph is undirected (count unique connected triples of nodes) // This implementation is faster, it converts hash tables to vectors template <class PGraph> void GetTriads(const PGraph& Graph, TIntTrV& NIdCOTriadV, int SampleNodes) { const bool IsDir = Graph->HasFlag(gfDirected); TIntSet NbrH; TIntV NIdV; //TRnd Rnd(0); TRnd Rnd(1); int NNodes; TIntV Nbrs; int NId; int64 hcount; hcount = 0; NNodes = Graph->GetNodes(); Graph->GetNIdV(NIdV); NIdV.Shuffle(Rnd); if (SampleNodes == -1) { SampleNodes = NNodes; } int MxId = -1; for (int i = 0; i < NNodes; i++) { if (NIdV[i] > MxId) { MxId = NIdV[i]; } } TVec<TIntV> NbrV(MxId + 1); if (IsDir) { // get in and out neighbors for (int node = 0; node < NNodes; node++) { int NId = NIdV[node]; NbrV[NId] = TIntV(); GetUniqueNbrV(Graph, NId, NbrV[NId]); } } else { // get only out neighbors for (int node = 0; node < NNodes; node++) { int NId = NIdV[node]; typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); NbrV[NId] = TIntV(); NbrV[NId].Reserve(NI.GetOutDeg()); NbrV[NId].Reduce(0); for (int i = 0; i < NI.GetOutDeg(); i++) { NbrV[NId].Add(NI.GetOutNId(i)); } } } NIdCOTriadV.Clr(false); NIdCOTriadV.Reserve(SampleNodes); for (int node = 0; node < SampleNodes; node++) { typename PGraph::TObj::TNodeI NI = Graph->GetNI(NIdV[node]); int NLen; NId = NI.GetId(); hcount++; if (NI.GetDeg() < 2) { NIdCOTriadV.Add(TIntTr(NId, 0, 0)); // zero triangles continue; } Nbrs = NbrV[NId]; NLen = Nbrs.Len(); // count connected neighbors int OpenCnt1 = 0, CloseCnt1 = 0; for (int srcNbr = 0; srcNbr < NLen; srcNbr++) { int Count = GetCommon(NbrV[NbrV[NId][srcNbr]],Nbrs); CloseCnt1 += Count; } CloseCnt1 /= 2; OpenCnt1 = (NLen*(NLen-1))/2 - CloseCnt1; NIdCOTriadV.Add(TIntTr(NId, CloseCnt1, OpenCnt1)); } } #if 0 // OP RS 2016/08/25, this is an alternative implementation of GetTriangleCnt() template<class PGraph> int64 CountTriangles(const PGraph& Graph) { THash<TInt, TInt> H; TIntV MapV; int ind = 0; for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) { H.AddDat(NI.GetId(), ind); MapV.Add(NI.GetId()); ind += 1; } TVec<TIntV> HigherDegNbrV(ind); #ifdef USE_OPENMP #pragma omp parallel for schedule(dynamic) #endif for (int i = 0; i < ind; i++) { typename PGraph::TObj::TNodeI NI = Graph->GetNI(MapV[i]); TIntV NbrV; MergeNbrs<PGraph>(NbrV, NI); TIntV V; for (int j = 0; j < NbrV.Len(); j++) { TInt Vert = NbrV[j]; TInt Deg = Graph->GetNI(Vert).GetDeg(); if (Deg > NI.GetDeg() || (Deg == NI.GetDeg() && Vert > NI.GetId())) { V.Add(Vert); } } HigherDegNbrV[i] = V; } int64 cnt = 0; #ifdef USE_OPENMP #pragma omp parallel for schedule(dynamic) reduction(+:cnt) #endif for (int i = 0; i < HigherDegNbrV.Len(); i++) { for (int j = 0; j < HigherDegNbrV[i].Len(); j++) { TInt NbrInd = H.GetDat(HigherDegNbrV[i][j]); int64 num = GetCommon(HigherDegNbrV[i], HigherDegNbrV[NbrInd]); cnt += num; } } return cnt; } #endif template<class PGraph> int64 GetTriangleCnt(const PGraph& Graph) { const int NNodes = Graph->GetNodes(); TIntV MapV(NNodes); TVec<typename PGraph::TObj::TNodeI> NV(NNodes); NV.Reduce(0); int MxId = -1; int ind = 0; for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) { NV.Add(NI); int Id = NI.GetId(); if (Id > MxId) { MxId = Id; } MapV[ind] = Id; ind++; } TIntV IndV(MxId+1); for (int j = 0; j < NNodes; j++) { IndV[MapV[j]] = j; } ind = MapV.Len(); TVec<TIntV> HigherDegNbrV(ind); for (int i = 0; i < ind; i++) { HigherDegNbrV[i] = TVec<TInt>(); HigherDegNbrV[i].Reserve(NV[i].GetDeg()); HigherDegNbrV[i].Reduce(0); } #ifdef USE_OPENMP #pragma omp parallel for schedule(dynamic) #endif for (int i = 0; i < ind; i++) { typename PGraph::TObj::TNodeI NI = NV[i]; MergeNbrs<PGraph>(HigherDegNbrV[i], NI); int k = 0; for (int j = 0; j < HigherDegNbrV[i].Len(); j++) { TInt Vert = HigherDegNbrV[i][j]; TInt Deg = NV[IndV[Vert]].GetDeg(); if (Deg > NI.GetDeg() || (Deg == NI.GetDeg() && Vert > NI.GetId())) { HigherDegNbrV[i][k] = Vert; k++; } } HigherDegNbrV[i].Reduce(k); } int64 cnt = 0; #ifdef USE_OPENMP #pragma omp parallel for schedule(dynamic) reduction(+:cnt) #endif for (int i = 0; i < HigherDegNbrV.Len(); i++) { for (int j = 0; j < HigherDegNbrV[i].Len(); j++) { TInt NbrInd = IndV[HigherDegNbrV[i][j]]; int64 num = GetCommon(HigherDegNbrV[i], HigherDegNbrV[NbrInd]); cnt += num; } } return cnt; } template<class PGraph> void MergeNbrs(TIntV& NeighbourV, const typename PGraph::TObj::TNodeI& NI) { int j = 0; int k = 0; int prev = -1; int indeg = NI.GetInDeg(); int outdeg = NI.GetOutDeg(); if (indeg > 0 && outdeg > 0) { int v1 = NI.GetInNId(j); int v2 = NI.GetOutNId(k); while (1) { if (v1 <= v2) { if (prev != v1) { NeighbourV.Add(v1); prev = v1; } j += 1; if (j >= indeg) { break; } v1 = NI.GetInNId(j); } else { if (prev != v2) { NeighbourV.Add(v2); prev = v2; } k += 1; if (k >= outdeg) { break; } v2 = NI.GetOutNId(k); } } } while (j < indeg) { int v = NI.GetInNId(j); if (prev != v) { NeighbourV.Add(v); prev = v; } j += 1; } while (k < outdeg) { int v = NI.GetOutNId(k); if (prev != v) { NeighbourV.Add(v); prev = v; } k += 1; } } // Count the number of edges that participate in at least one triad template <class PGraph> int GetTriadEdges(const PGraph& Graph, int SampleEdges) { const bool IsDir = Graph->HasFlag(gfDirected); TIntSet NbrH; int TriadEdges = 0; for(typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) { NbrH.Clr(false); for (int e = 0; e < NI.GetOutDeg(); e++) { if (NI.GetOutNId(e) != NI.GetId()) { NbrH.AddKey(NI.GetOutNId(e)); } } if (IsDir) { for (int e = 0; e < NI.GetInDeg(); e++) { if (NI.GetInNId(e) != NI.GetId()) { NbrH.AddKey(NI.GetInNId(e)); } } } for (int e = 0; e < NI.GetOutDeg(); e++) { if (!IsDir && NI.GetId()<NI.GetOutNId(e)) { continue; } // for undirected graphs count each edge only once const typename PGraph::TObj::TNodeI SrcNode = Graph->GetNI(NI.GetOutNId(e)); bool Triad=false; for (int e1 = 0; e1 < SrcNode.GetOutDeg(); e1++) { if (NbrH.IsKey(SrcNode.GetOutNId(e1))) { Triad=true; break; } } if (IsDir && ! Triad) { for (int e1 = 0; e1 < SrcNode.GetInDeg(); e1++) { if (NbrH.IsKey(SrcNode.GetInNId(e1))) { Triad=true; break; } } } if (Triad) { TriadEdges++; } } } return TriadEdges; } // Returns number of undirected triads a node participates in template <class PGraph> int GetNodeTriads(const PGraph& Graph, const int& NId) { int ClosedTriads=0, OpenTriads=0; return GetNodeTriads(Graph, NId, ClosedTriads, OpenTriads); } // Return number of undirected triads a node participates in template <class PGraph> int GetNodeTriads(const PGraph& Graph, const int& NId, int& ClosedTriads, int& OpenTriads) { const typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); ClosedTriads=0; OpenTriads=0; if (NI.GetDeg() < 2) { return 0; } // find neighborhood TIntSet NbrSet(NI.GetDeg()); for (int e = 0; e < NI.GetOutDeg(); e++) { if (NI.GetOutNId(e) != NI.GetId()) { // exclude self edges NbrSet.AddKey(NI.GetOutNId(e)); } } if (Graph->HasFlag(gfDirected)) { for (int e = 0; e < NI.GetInDeg(); e++) { if (NI.GetInNId(e) != NI.GetId()) { // exclude self edges NbrSet.AddKey(NI.GetInNId(e)); } } } // count connected neighbors for (int srcNbr = 0; srcNbr < NbrSet.Len(); srcNbr++) { const typename PGraph::TObj::TNodeI SrcNode = Graph->GetNI(NbrSet.GetKey(srcNbr)); for (int dstNbr = srcNbr+1; dstNbr < NbrSet.Len(); dstNbr++) { const int dstNId = NbrSet.GetKey(dstNbr); if (SrcNode.IsNbrNId(dstNId)) { ClosedTriads++; } else { OpenTriads++; } } } return ClosedTriads; } template <class PGraph> int GetNodeTriadsAll(const PGraph& Graph, const int& NId, int& ClosedTriads, int& OpenTriads) { return GetNodeTriads(Graph, NId, ClosedTriads, OpenTriads); } // Node NId and a subset of its neighbors GroupSet // InGroupEdges ... triads (NId, g1, g2), where g1 and g2 are in GroupSet // InOutGroupEdges ... triads (NId, g1, o1), where g1 in GroupSet and o1 not in GroupSet // OutGroupEdges ... triads (NId, o1, o2), where o1 and o2 are not in GroupSet template <class PGraph> int GetNodeTriads(const PGraph& Graph, const int& NId, const TIntSet& GroupSet, int& InGroupEdges, int& InOutGroupEdges, int& OutGroupEdges) { const typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); const bool IsDir = Graph->HasFlag(gfDirected); InGroupEdges=0; InOutGroupEdges=0; OutGroupEdges=0; if (NI.GetDeg() < 2) { return 0; } // find neighborhood TIntSet NbrSet(NI.GetDeg()); for (int e = 0; e < NI.GetOutDeg(); e++) { if (NI.GetOutNId(e) != NI.GetId()) { // exclude self edges NbrSet.AddKey(NI.GetOutNId(e)); } } if (IsDir) { for (int e = 0; e < NI.GetInDeg(); e++) { if (NI.GetInNId(e) != NI.GetId()) { NbrSet.AddKey(NI.GetInNId(e)); } } } // count connected neighbors for (int srcNbr = 0; srcNbr < NbrSet.Len(); srcNbr++) { const int NbrId = NbrSet.GetKey(srcNbr); const bool NbrIn = GroupSet.IsKey(NbrId); const typename PGraph::TObj::TNodeI SrcNode = Graph->GetNI(NbrId); for (int dstNbr = srcNbr+1; dstNbr < NbrSet.Len(); dstNbr++) { const int DstNId = NbrSet.GetKey(dstNbr); if (SrcNode.IsNbrNId(DstNId)) { // triad (NId, NbrId, DstNid) bool DstIn = GroupSet.IsKey(DstNId); if (NbrIn && DstIn) { InGroupEdges++; } else if (NbrIn || DstIn) { InOutGroupEdges++; } else { OutGroupEdges++; } } } } return InGroupEdges; } // For each node count how many triangles it participates in template <class PGraph> void GetTriadParticip(const PGraph& Graph, TIntPrV& TriadCntV) { TIntH TriadCntH; for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) { const int Triads = GetNodeTriads(Graph, NI.GetId()); TriadCntH.AddDat(Triads) += 1; } TriadCntH.GetKeyDatPrV(TriadCntV); TriadCntV.Sort(); } template<class PGraph> int GetCmnNbrs(const PGraph& Graph, const int& NId1, const int& NId2) { TIntV NbrV; return GetCmnNbrs(Graph, NId1, NId2, NbrV); } // Get common neighbors between a pair of nodes (undirected) template<class PGraph> int GetCmnNbrs(const PGraph& Graph, const int& NId1, const int& NId2, TIntV& NbrV) { if (! Graph->IsNode(NId1) || ! Graph->IsNode(NId2)) { NbrV.Clr(false); return 0; } typename PGraph::TObj::TNodeI NI1 = Graph->GetNI(NId1); typename PGraph::TObj::TNodeI NI2 = Graph->GetNI(NId2); NbrV.Clr(false); NbrV.Reserve(TMath::Mn(NI1.GetDeg(), NI2.GetDeg())); TIntSet NSet1(NI1.GetDeg()), NSet2(NI2.GetDeg()); for (int i = 0; i < NI1.GetDeg(); i++) { const int nid = NI1.GetNbrNId(i); if (nid!=NId1 && nid!=NId2) { NSet1.AddKey(nid); } } for (int i = 0; i < NI2.GetDeg(); i++) { const int nid = NI2.GetNbrNId(i); if (NSet1.IsKey(nid)) { NSet2.AddKey(nid); } } NSet2.GetKeyV(NbrV); return NbrV.Len(); } template<> inline int GetCmnNbrs<PUNGraph>(const PUNGraph& Graph, const int& NId1, const int& NId2, TIntV& NbrV) { if (! Graph->IsNode(NId1) || ! Graph->IsNode(NId2)) { NbrV.Clr(false); return 0; } const TUNGraph::TNodeI NI1 = Graph->GetNI(NId1); const TUNGraph::TNodeI NI2 = Graph->GetNI(NId2); int i=0, j=0; NbrV.Clr(false); NbrV.Reserve(TMath::Mn(NI1.GetDeg(), NI2.GetDeg())); while (i < NI1.GetDeg() && j < NI2.GetDeg()) { const int nid = NI1.GetNbrNId(i); while (j < NI2.GetDeg() && NI2.GetNbrNId(j) < nid) { j++; } if (j < NI2.GetDeg() && nid==NI2.GetNbrNId(j) && nid!=NId1 && nid!=NId2) { IAssert(NbrV.Empty() || NbrV.Last() < nid); NbrV.Add(nid); j++; } i++; } return NbrV.Len(); } // get number of length 2 directed paths between a pair of nodes // for a pair of nodes (i,j): |{u: (i,u) and (u,j) }| template<class PGraph> int GetLen2Paths(const PGraph& Graph, const int& NId1, const int& NId2) { TIntV NbrV; return GetLen2Paths(Graph, NId1, NId2, NbrV); } // get number of length 2 directed paths between a pair of nodes // for a pair of nodes (i,j): {u: (i,u) and (u,j) } template<class PGraph> int GetLen2Paths(const PGraph& Graph, const int& NId1, const int& NId2, TIntV& NbrV) { const typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId1); NbrV.Clr(false); NbrV.Reserve(NI.GetOutDeg()); for (int e = 0; e < NI.GetOutDeg(); e++) { const typename PGraph::TObj::TNodeI MidNI = Graph->GetNI(NI.GetOutNId(e)); if (MidNI.IsOutNId(NId2)) { NbrV.Add(MidNI.GetId()); } } return NbrV.Len(); } template <class PGraph> void GetUniqueNbrV(const PGraph& Graph, const int& NId, TIntV& NbrV) { typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); NbrV.Reserve(NI.GetDeg()); NbrV.Reduce(0); int j = 0; int k = 0; int Prev = -1; int InDeg = NI.GetInDeg(); int OutDeg = NI.GetOutDeg(); if (InDeg > 0 && OutDeg > 0) { int v1 = NI.GetInNId(j); int v2 = NI.GetOutNId(k); while (1) { if (v1 <= v2) { if (Prev != v1) { if (v1 != NId) { NbrV.Add(v1); Prev = v1; } } j += 1; if (j >= InDeg) { break; } v1 = NI.GetInNId(j); } else { if (Prev != v2) { if (v2 != NId) { NbrV.Add(v2); } Prev = v2; } k += 1; if (k >= OutDeg) { break; } v2 = NI.GetOutNId(k); } } } while (j < InDeg) { int v = NI.GetInNId(j); if (Prev != v) { if (v != NId) { NbrV.Add(v); } Prev = v; } j += 1; } while (k < OutDeg) { int v = NI.GetOutNId(k); if (Prev != v) { if (v != NId) { NbrV.Add(v); } Prev = v; } k += 1; } } }; // mamespace TSnap ///////////////////////////////////////////////// // Node and Edge Network Constraint (by Ron Burt) // works for directed and undirected graphs (but not for multigraphs) template <class PGraph> class TNetConstraint { public: PGraph Graph; THash<TIntPr, TFlt> NodePrCH; // pairs of nodes that have non-zero network constraint public: TNetConstraint(const PGraph& GraphPt, const bool& CalcaAll=true); int Len() const { return NodePrCH.Len(); } double GetC(const int& ConstraintN) const { return NodePrCH[ConstraintN]; } TIntPr GetNodePr(const int& ConstraintN) const { return NodePrCH.GetKey(ConstraintN); } double GetEdgeC(const int& NId1, const int& NId2) const; double GetNodeC(const int& NId) const; void AddConstraint(const int& NId1, const int& NId2); void CalcConstraints(); void CalcConstraints(const int& NId); void Dump() const; static void Test(); }; template <class PGraph> TNetConstraint<PGraph>::TNetConstraint(const PGraph& GraphPt, const bool& CalcaAll) : Graph(GraphPt) { CAssert(! HasGraphFlag(typename PGraph::TObj, gfMultiGraph)); // must not be multigraph if (CalcaAll) { CalcConstraints(); } } template <class PGraph> double TNetConstraint<PGraph>::GetEdgeC(const int& NId1, const int& NId2) const { if (NodePrCH.IsKey(TIntPr(NId1, NId2))) { return NodePrCH.GetDat(TIntPr(NId1, NId2)); } else { return 0.0; } } template <class PGraph> double TNetConstraint<PGraph>::GetNodeC(const int& NId) const { typename PGraph::TObj::TNodeI NI1 = Graph->GetNI(NId); if (NI1.GetOutDeg() == 0) { return 0.0; } int KeyId = -1; for (int k = 0; k<NI1.GetOutDeg(); k++) { KeyId = NodePrCH.GetKeyId(TIntPr(NI1.GetId(), NI1.GetOutNId(k))); if (KeyId > -1) { break; } } if (KeyId < 0) { return 0.0; } double Constraint = NodePrCH[KeyId]; for (int i = KeyId-1; i >-1 && NodePrCH.GetKey(i).Val1()==NId; i--) { Constraint += NodePrCH[i]; } for (int i = KeyId+1; i < NodePrCH.Len() && NodePrCH.GetKey(i).Val1()==NId; i++) { Constraint += NodePrCH[i]; } return Constraint; } template <class PGraph> void TNetConstraint<PGraph>::AddConstraint(const int& NId1, const int& NId2) { if (NId1==NId2 || NodePrCH.IsKey(TIntPr(NId1, NId2))) { return; } typename PGraph::TObj::TNodeI NI1 = Graph->GetNI(NId1); double Constraint = 0.0; if (NI1.IsOutNId(NId2)) { // is direct edge Constraint += 1.0/(double) NI1.GetOutDeg(); } const double SrcC = 1.0/(double) NI1.GetOutDeg(); for (int e = 0; e < NI1.GetOutDeg(); e++) { const int MidNId = NI1.GetOutNId(e); if (MidNId == NId1 || MidNId == NId2) { continue; } const typename PGraph::TObj::TNodeI MidNI = Graph->GetNI(MidNId); if (MidNI.IsOutNId(NId2)) { Constraint += SrcC * (1.0/(double)MidNI.GetOutDeg()); } } if (Constraint==0) { return; } Constraint = TMath::Sqr(Constraint); NodePrCH.AddDat(TIntPr(NId1, NId2), Constraint); } template <class PGraph> void TNetConstraint<PGraph>::CalcConstraints() { // add edges for (typename PGraph::TObj::TEdgeI EI = Graph->BegEI(); EI < Graph->EndEI(); EI++) { AddConstraint(EI.GetSrcNId(), EI.GetDstNId()); AddConstraint(EI.GetDstNId(), EI.GetSrcNId()); } // add open triads for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) { for (int i = 0; i < NI.GetDeg(); i++) { const int NId1 = NI.GetNbrNId(i); for (int j = 0; j < NI.GetDeg(); j++) { const int NId2 = NI.GetNbrNId(j); AddConstraint(NId1, NId2); } } } NodePrCH.SortByKey(); } // calculate constraints around a node id template <class PGraph> void TNetConstraint<PGraph>::CalcConstraints(const int& NId) { typename PGraph::TObj::TNodeI StartNI = Graph->GetNI(NId); TIntSet SeenSet; for (int e = 0; e < StartNI.GetOutDeg(); e++) { typename PGraph::TObj::TNodeI MidNI = Graph->GetNI(StartNI.GetOutNId(e)); AddConstraint(NId, MidNI.GetId()); for (int i = 0; i < MidNI.GetOutDeg(); i++) { const int EndNId = MidNI.GetOutNId(i); if (! SeenSet.IsKey(EndNId)) { AddConstraint(NId, EndNId); SeenSet.AddKey(EndNId); } } } } template <class PGraph> void TNetConstraint<PGraph>::Dump() const { printf("Edge network constraint: (%d, %d)\n", Graph->GetNodes(), Graph->GetEdges()); for (int e = 0; e < NodePrCH.Len(); e++) { printf(" %4d %4d : %f\n", NodePrCH.GetKey(e).Val1(), NodePrCH.GetKey(e).Val2(), NodePrCH[e].Val); } printf("\n"); } // example from page 56 of Structural Holes by Ronald S. Burt // (http://www.amazon.com/Structural-Holes-Social-Structure-Competition/dp/0674843711) template <class PGraph> void TNetConstraint<PGraph>::Test() { PUNGraph G = TUNGraph::New(); G->AddNode(0); G->AddNode(1); G->AddNode(2); G->AddNode(3); G->AddNode(4); G->AddNode(5); G->AddNode(6); G->AddEdge(0,1); G->AddEdge(0,2); G->AddEdge(0,3); G->AddEdge(0,4); G->AddEdge(0,5); G->AddEdge(0,6); G->AddEdge(1,2); G->AddEdge(1,5); G->AddEdge(1,6); G->AddEdge(2,4); TNetConstraint<PUNGraph> NetConstraint(G, true); // NetConstraint.CalcConstraints(0); NetConstraint.Dump(); printf("middle node network constraint: %f\n", NetConstraint.GetNodeC(0)); } #endif // TRIAD_H
FourierTransform.h
/* Copyright 2016 Kristofer Björnson * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** @package TBTKcalc * @file FourierTransform.h * @brief Fourier transform * * @author Kristofer Björnson */ #ifndef COM_DAFER45_TBTK_FOURIER_TRANSFORM #define COM_DAFER45_TBTK_FOURIER_TRANSFORM #include "TBTK/CArray.h" #include "TBTK/Index.h" #include <fftw3.h> #include <complex> #include <vector> namespace TBTK{ /** @brief Fourier transform * * # Example * \snippet FourierTransform/FourierTransform.cpp FourierTransform * ## Output * \image html output/FourierTransform/FourierTransform/figures/FourierTransformFourierTransform1D.png * \image html output/FourierTransform/FourierTransform/figures/FourierTransformFourierTransform2DReal.png * \image html output/FourierTransform/FourierTransform/figures/FourierTransformFourierTransform2DImaginary.png * \image html output/FourierTransform/FourierTransform/figures/FourierTransformFourierTransformWithPlan.png */ class FourierTransform{ public: /** Plan for executing the Fourier-transform. */ template<typename DataType> class Plan{ public: /** Constructor. */ Plan( const CArray<DataType> &in, CArray<DataType> &out, const std::vector<unsigned int> &ranges, int sign ); /** Copy constructor. */ Plan(const Plan &plan) = delete; /** Move constructor. */ Plan(Plan &&plan); /** Destructor. */ ~Plan(); /** Assignment operator. */ Plan& operator=(const Plan &plan) = delete; /** Move assignment operator. */ Plan& operator=(Plan &&plan); /** Set normalization factor. */ void setNormalizationFactor(double normalizationFactor); /** Get normalizationFactor. */ double getNormalizationFactor() const; private: /** FFTW3 plan. */ fftw_plan *plan; /** Normalization factor. */ double normalizationFactor; /** Data size. */ unsigned int size; /** Input data. */ const CArray<DataType> &input; /** Output data. */ CArray<DataType> &output; /** Get FFTW3 plan. */ fftw_plan& getFFTWPlan(); /** Get data size. */ unsigned int getSize() const; /** Get input data. */ CArray<DataType>& getInput(); /** Get output data. */ CArray<DataType>& getOutput(); /** Make FourierTransform a friend class. */ friend class FourierTransform; }; /** Plan for executing forward Fourier-transform. */ template<typename DataType> class ForwardPlan : public Plan<DataType>{ public: /** Constructor. */ ForwardPlan( const CArray<DataType> &in, CArray<DataType> &out, const std::vector<unsigned int> &ranges ) : Plan<DataType>(in, out, ranges, -1){} }; /** Plan for executing inverse Fourier-transform. */ template<typename DataType> class InversePlan : public Plan<DataType>{ public: /** Constructor. */ InversePlan( const CArray<DataType> &in, CArray<DataType> &out, const std::vector<unsigned int> &ranges ) : Plan<DataType>(in, out, ranges, 1 ){} }; /** N-dimensional complex Fourier transform. * * @param in Input data. * @param out Output data. * @param ranges The dimensions of the data. * @param sign The sign to use in the exponent of the Fourier * transform. */ static void transform( const CArray<std::complex<double>> &in, CArray<std::complex<double>> &out, const std::vector<unsigned int> &ranges, int sign ); /** Execute a planned transform. * * @param plan The plan to execute. */ template<typename DataType> static void transform(Plan<DataType> &plan); /** N-dimensional complex forward Fourier transform. * * @param in Input data. * @param out Output data. * @param ranges The dimensions of the data. */ static void forward( const CArray<std::complex<double>> &in, CArray<std::complex<double>> &out, const std::vector<unsigned int> &ranges ); /** N-dimensional complex inverse Fourier transform. * * @param in Input data. * @param out Output data. * @param ranges The dimensions of the data. */ static void inverse( const CArray<std::complex<double>> &in, CArray<std::complex<double>> &out, const std::vector<unsigned int> &ranges ); private: }; template<typename DataType> inline void FourierTransform::transform(Plan<DataType> &plan){ fftw_execute(plan.getFFTWPlan()); double normalizationFactor = plan.getNormalizationFactor(); if(normalizationFactor != 1.){ CArray<DataType> &output = plan.getOutput(); for(unsigned int n = 0; n < plan.getSize(); n++) output[n] /= normalizationFactor; } } inline void FourierTransform::forward( const CArray<std::complex<double>> &in, CArray<std::complex<double>> &out, const std::vector<unsigned int> &ranges ){ transform(in, out, ranges, -1); } inline void FourierTransform::inverse( const CArray<std::complex<double>> &in, CArray<std::complex<double>> &out, const std::vector<unsigned int> &ranges ){ transform(in, out, ranges, 1); } template<typename DataType> inline FourierTransform::Plan<DataType>::Plan(Plan &&plan){ this->plan = plan.plan; plan.plan = nullptr; normalizationFactor = plan.normalizationFactor; size = plan.size; input = plan.input; output = plan.output; } template<typename DataType> inline FourierTransform::Plan<DataType>::~Plan(){ if(plan != nullptr){ #pragma omp critical (TBTK_FOURIER_TRANSFORM) fftw_destroy_plan(*plan); delete plan; } } template<typename DataType> inline FourierTransform::Plan<DataType>& FourierTransform::Plan< DataType >::operator=(Plan &&rhs){ if(this != &rhs){ if(this->plan != nullptr){ #pragma omp critical (TBTK_FOURIER_TRANSFORM) fftw_destroy_plan(*this->plan); delete this->plan; this->plan = rhs.plan; normalizationFactor = rhs.normalizationFactor; size = rhs.size; input = rhs.input; output = rhs.output; } } return *this; } template<typename DataType> inline void FourierTransform::Plan<DataType>::setNormalizationFactor( double normalizationFactor ){ this->normalizationFactor = normalizationFactor; } template<typename DataType> inline double FourierTransform::Plan<DataType>::getNormalizationFactor() const{ return normalizationFactor; } template<typename DataType> inline fftw_plan& FourierTransform::Plan<DataType>::getFFTWPlan(){ return *plan; } template<typename DataType> inline unsigned int FourierTransform::Plan<DataType>::getSize() const{ return size; } template<typename DataType> inline CArray<DataType>& FourierTransform::Plan<DataType>::getInput(){ return input; } template<typename DataType> inline CArray<DataType>& FourierTransform::Plan<DataType>::getOutput(){ return output; } }; //End of namespace TBTK #endif
util.c
#include "compiler.h" #include "util.h" void * mymalloc(size_t bytes) { void * ptr = NULL; int rc = posix_memalign(&ptr, alignment, bytes); if (rc != 0 || ptr == NULL) abort(); return ptr; } size_t compare_doubles(size_t n, const double * RESTRICT x, const double * RESTRICT y) { size_t errors = 0; #pragma omp parallel for reduction(+:errors) for (size_t i=0; i<n; i++) { if (x[i] != y[i]) errors++; } return errors; } size_t compare_doubles_stride(size_t n, const double * RESTRICT x, const double * RESTRICT y, int stride) { size_t errors = 0; #pragma omp parallel for reduction(+:errors) for (size_t i=0; i<n; i+=stride) { if (x[i] != y[i]) errors++; } return errors; } size_t compare_doubles_stride_holes(size_t n, const double * RESTRICT x, const double * RESTRICT y, int stride, double val) { size_t errors = 0; #pragma omp parallel for reduction(+:errors) for (size_t i=0; i<n; i+=stride) { /* check the part that is copied */ if (y[i] != x[i]) errors++; /* between the strides, elements should not change */ for (int s=1; s<stride && i+s<n; s++) { if (y[i+s] != val) errors++; } } return errors; } void init_doubles(size_t n, double * RESTRICT x) { #pragma omp parallel for for (size_t i=0; i<n; i++) { x[i] = (double)i; } } void set_doubles(size_t n, double value, double * RESTRICT x) { #pragma omp parallel for for (size_t i=0; i<n; i++) { x[i] = value; } } void print_doubles_1(size_t n, const double * RESTRICT x) { for (size_t i=0; i<n; i++) { printf("%zu %lf\n", i, x[i]); } fflush(stdout); } void print_doubles_2(size_t n, const double * RESTRICT x, const double * RESTRICT y) { for (size_t i=0; i<n; i++) { printf("%zu %lf %lf\n", i, x[i], y[i]); } fflush(stdout); } void print_compare_doubles_stride_holes(size_t n, const double * RESTRICT x, const double * RESTRICT y, int stride, double val) { for (size_t i=0; i<n; i+=stride) { printf("%zu %lf %lf %s\n", i, y[i], x[i], (y[i]==x[i]) ? "" : "ERROR"); for (int s=1; s<stride && i+s<n; s++) { printf("%zu %lf %lf %s\n", i+s, y[i+s], val, (y[i+s]==val) ? "" : "ERROR"); } } fflush(stdout); }
ChMatrix.h
// ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Authors: Alessandro Tasora, Radu Serban // ============================================================================= #ifndef CHMATRIX_H #define CHMATRIX_H #include "chrono/core/ChCoordsys.h" #include "chrono/core/ChException.h" #include "chrono/ChConfig.h" #include "chrono/serialization/ChArchive.h" #include "chrono/serialization/ChArchiveAsciiDump.h" #if defined(CHRONO_HAS_SSE) || defined(CHRONO_HAS_AVX) #include <immintrin.h> #endif #if defined(CHRONO_HAS_NEON) #include <arm_neon.h> #endif namespace chrono { // // FAST MACROS TO SPEEDUP CODE // #define Set33Element(a, b, val) SetElementN(((a * 3) + (b)), val) #define Get33Element(a, b) GetElementN((a * 3) + (b)) #define Set34Element(a, b, val) SetElementN(((a * 4) + (b)), val) #define Get34Element(a, b) GetElementN((a * 4) + (b)) #define Set34Row(ma, a, val0, val1, val2, val3) \ ma.SetElementN((a * 4), val0); \ ma.SetElementN((a * 4) + 1, val1); \ ma.SetElementN((a * 4) + 2, val2); \ ma.SetElementN((a * 4) + 3, val3); #define Set44Element(a, b, val) SetElementN(((a * 4) + (b)), val) #define Get44Element(a, b) GetElementN((a * 4) + (b)) // forward declaration template <class Real = double> class ChMatrixDynamic; /// /// ChMatrix: /// /// A base class for matrix objects (tables of NxM numbers). /// To access elements, the indexes start from zero, and /// you must indicate first row, then column, that is: m(2,4) /// means the element at 3rd row, 5th column. /// This is an abstract class, so you cannot instantiate /// objects from it: you must rather create matrices using the /// specialized child classes like ChMatrixDynamic, ChMatrixNM, /// ChMatrix33 and so on; all of them have this same base class. /// Warning: for optimization reasons, not all functions will /// check about boundaries of element indexes and matrix sizes (in /// some cases, if sizes are wrong, debug asserts are used). /// /// Further info at the @ref mathematical_objects manual page. template <class Real = double> class ChMatrix { protected: // // DATA // int rows = 1; int columns = 1; Real* address; public: // // CONSTRUCTORS (none - abstract class that must be implemented with child classes) // virtual ~ChMatrix() {} // // OPERATORS OVERLOADING // /// Parenthesis () operator, to access a single element of the matrix, by /// supplying the row and the column (indexes start from 0). /// For example: m(3,5) gets the element at the 4th row, 6th column. /// Value is returned by reference, so it can be modified, like in m(1,2)=10. Real& operator()(const int row, const int col) { assert(row >= 0 && col >= 0 && row < rows && col < columns); return (*(address + col + (row * columns))); } const Real& operator()(const int row, const int col) const { assert(row >= 0 && col >= 0 && row < rows && col < columns); return (*(address + col + (row * columns))); } /// Parenthesis () operator, to access a single element of the matrix, by /// supplying the ordinal of the element (indexes start from 0). /// For example: m(3) gets the 4th element, counting row by row. /// Mostly useful if the matrix is Nx1 sized (i.e. a N-element vector). /// Value is returned by reference, so it can be modified, like in m(1,2)=10. Real& operator()(const int el) { assert(el >= 0 && el < rows * columns); return (*(address + el)); } const Real& operator()(const int el) const { assert(el >= 0 && el < rows * columns); return (*(address + el)); } /// The [] operator returns the address of the n-th row. This is mostly /// for compatibility with old matrix programming styles (2d array-like) /// where to access an element at row i, column j, one can write mymatrix[i][j]. Real* operator[](const int row) { assert(row >= 0 && row < rows); return ((address + (row * columns))); } const Real* operator[](const int row) const { assert(row >= 0 && row < rows); return ((address + (row * columns))); } /// Multiplies this matrix by a factor, in place ChMatrix<Real>& operator*=(const Real factor) { MatrScale(factor); return *this; } /// Increments this matrix by another matrix, in place template <class RealB> ChMatrix<Real>& operator+=(const ChMatrix<RealB>& matbis) { MatrInc(matbis); return *this; } /// Decrements this matrix by another matrix, in place template <class RealB> ChMatrix<Real>& operator-=(const ChMatrix<RealB>& matbis) { MatrDec(matbis); return *this; } /// Matrices are equal? bool operator==(const ChMatrix<Real>& other) { return Equals(other); } /// Matrices are not equal? bool operator!=(const ChMatrix<Real>& other) { return !Equals(other); } /// Assignment operator virtual ChMatrix<Real>& operator=(const ChMatrix<Real>& matbis) { if (&matbis != this) CopyFromMatrix(matbis); return *this; } template <class RealB> ChMatrix<Real>& operator=(const ChMatrix<RealB>& matbis) { CopyFromMatrix(matbis); return *this; } // // FUNCTIONS // /// Sets the element at row,col position. Indexes start with zero. void SetElement(int row, int col, Real elem) { assert(row >= 0 && col >= 0 && row < rows && col < columns); // boundary checks *(address + col + (row * columns)) = elem; } /// Gets the element at row,col position. Indexes start with zero. /// The return value is a copy of original value. Use Element() instead if you /// want to access directly by reference the original element. Real GetElement(int row, int col) { assert(row >= 0 && col >= 0 && row < rows && col < columns); // boundary checks return (*(address + col + (row * columns))); } Real GetElement(int row, int col) const { assert(row >= 0 && col >= 0 && row < rows && col < columns); // boundary checks return (*(address + col + (row * columns))); } /// Sets the Nth element, counting row after row. void SetElementN(int index, Real elem) { assert(index >= 0 && index < (rows * columns)); // boundary checks *(address + index) = elem; } /// Gets the Nth element, counting row after row. Real GetElementN(int index) { assert(index >= 0 && index < (rows * columns)); return (*(address + index)); } const Real GetElementN(int index) const { assert(index >= 0 && index < (rows * columns)); return (*(address + index)); } /// Access a single element of the matrix, by /// supplying the row and the column (indexes start from 0). /// Value is returned by reference, so it can be modified, like in m.Element(1,2)=10. Real& Element(int row, int col) { assert(row >= 0 && col >= 0 && row < rows && col < columns); return (*(address + col + (row * columns))); } const Real& Element(int row, int col) const { assert(row >= 0 && col >= 0 && row < rows && col < columns); return (*(address + col + (row * columns))); } /// Access a single element of the matrix, the Nth element, counting row after row. /// Value is returned by reference, so it can be modified, like in m.Element(5)=10. Real& ElementN(int index) { assert(index >= 0 && index < (rows * columns)); return (*(address + index)); } const Real& ElementN(int index) const { assert(index >= 0 && index < (rows * columns)); return (*(address + index)); } /// Access directly the "Real* address" buffer. Warning! this is a low level /// function, it should be used in rare cases, if really needed! Real* GetAddress() { return address; } const Real* GetAddress() const { return address; } /// Gets the number of rows int GetRows() const { return rows; } /// Gets the number of columns int GetColumns() const { return columns; } /// Reallocate memory for a new size. VIRTUAL! Must be implemented by child classes! virtual void Resize(int nrows, int ncols) {} /// Swaps the columns a and b void SwapColumns(int a, int b) { Real temp; for (int i = 0; i < rows; i++) { temp = GetElement(i, a); SetElement(i, a, GetElement(i, b)); SetElement(i, b, temp); } } /// Swap the rows a and b void SwapRows(int a, int b) { Real temp; for (int i = 0; i < columns; i++) { temp = GetElement(a, i); SetElement(a, i, GetElement(b, i)); SetElement(b, i, temp); } } /// Fill the diagonal elements, given a sample. /// Note that the matrix must already be square (no check for /// rectangular matrices!), and the extra-diagonal elements are /// not modified -this function does not set them to 0- void FillDiag(Real sample) { for (int i = 0; i < rows; ++i) SetElement(i, i, sample); } /// Fill the matrix with the same value in all elements void FillElem(Real sample) { for (int i = 0; i < rows * columns; ++i) SetElementN(i, sample); } /// Fill the matrix with random float numbers, falling within the /// "max"/"min" range. void FillRandom(Real max, Real min) { for (int i = 0; i < rows * columns; ++i) SetElementN(i, min + (Real)ChRandom() * (max - min)); } /// Resets the matrix to zero (warning: simply sets memory to 0 bytes!) virtual void Reset() { // SetZero(rows*columns); //memset(address, 0, sizeof(Real) * rows * columns); for (int i = 0; i < rows * columns; ++i) this->address[i] = 0; } /// Reset to zeroes and (if needed) changes the size to have row and col void Reset(int nrows, int ncols) { Resize(nrows, ncols); // SetZero(rows*columns); //memset(address, 0, sizeof(Real) * rows * columns); for (int i = 0; i < rows * columns; ++i) this->address[i] = 0; } /// Reset to identity matrix (ones on diagonal, zero elsewhere) void SetIdentity() { Reset(); FillDiag(1.0); } /// Copy a matrix "matra" into this matrix. Note that /// the destination matrix will be resized if necessary. template <class RealB> void CopyFromMatrix(const ChMatrix<RealB>& matra) { Resize(matra.GetRows(), matra.GetColumns()); // ElementsCopy(address, matra.GetAddress(), rows*columns); // memcpy (address, matra.address, (sizeof(Real) * rows * columns)); for (int i = 0; i < rows * columns; ++i) address[i] = (Real)matra.GetAddress()[i]; } /// Copy the transpose of matrix "matra" into this matrix. Note that /// the destination matrix will be resized if necessary. template <class RealB> void CopyFromMatrixT(const ChMatrix<RealB>& matra) { Resize(matra.GetColumns(), matra.GetRows()); for (int i = 0; i < matra.GetRows(); ++i) for (int j = 0; j < matra.GetColumns(); ++j) SetElement(j, i, (Real)matra.Element(i, j)); } /// Copy the transposed upper triangular part of "matra" in the lower triangular /// part of this matrix. (matra must be square) /// Note that the destination matrix will be resized if necessary. template <class RealB> // _______ // void CopyTUpMatrix(const ChMatrix<RealB>& matra) // \ | |\ // { // \ A'| ---> | \ // Resize(matra.GetRows(), matra.GetColumns()); // \ | |this\ // for (int i = 0; i < matra.GetRows(); i++) { // \| |______\ // for (int j = 0; j < matra.GetRows(); j++) SetElement(j, i, (Real)matra.GetElement(i, j)); } } /// Copy the transposed lower triangulat part of "matra" in the upper triangular /// part of this matrix. (matra must be square) /// Note that the destination matrix will be resized if necessary. template <class RealB> // _______ // void CopyTLwMatrix(const ChMatrix<RealB>& matra) // |\ \ | // { // | \ ---> \this| // Resize(matra.GetRows(), matra.GetColumns()); // |A' \ \ | // for (int i = 0; i < matra.GetRows(); i++) { // |______\ \| // for (int j = 0; j < matra.GetRows(); j++) SetElement(i, j, (Real)matra.GetElement(j, i)); } } // // STREAMING // /// Method to allow serialization of transient data in archives. virtual void ArchiveOUT(ChArchiveOut& marchive) { // suggested: use versioning marchive.VersionWrite(1); // stream out all member data if (ChArchiveAsciiDump* mascii = dynamic_cast<ChArchiveAsciiDump*>(&marchive)) { // CUSTOM row x col 'intuitive' table-like log when using ChArchiveAsciiDump: mascii->indent(); mascii->GetStream()->operator<<(rows); mascii->GetStream()->operator<<(" rows, "); mascii->GetStream()->operator<<(columns); mascii->GetStream()->operator<<(" columns:\n"); for (int i = 0; i < rows; i++) { mascii->indent(); for (int j = 0; j < columns; j++) { (*mascii->GetStream()) << Element(i, j); mascii->GetStream()->operator<<(", "); } mascii->GetStream()->operator<<("\n"); } } else { marchive << make_ChNameValue("rows", rows); marchive << make_ChNameValue("columns", columns); // NORMAL array-based serialization: int tot_elements = GetRows() * GetColumns(); ChValueSpecific< Real* > specVal(this->address, "data", 0); marchive.out_array_pre(specVal, tot_elements); for (int i = 0; i < tot_elements; i++) { marchive << CHNVP(ElementN(i), ""); marchive.out_array_between(specVal, tot_elements); } marchive.out_array_end(specVal, tot_elements); } } /// Method to allow de serialization of transient data from archives. virtual void ArchiveIN(ChArchiveIn& marchive) { // suggested: use versioning int version = marchive.VersionRead(); // stream in all member data int m_row, m_col; marchive >> make_ChNameValue("rows", m_row); marchive >> make_ChNameValue("columns", m_col); Reset(m_row, m_col); // custom input of matrix data as array size_t tot_elements = GetRows() * GetColumns(); marchive.in_array_pre("data", tot_elements); for (int i = 0; i < tot_elements; i++) { marchive >> CHNVP(ElementN(i)); marchive.in_array_between("data"); } marchive.in_array_end("data"); } /// Method to allow serializing transient data into in ascii /// as a readable item, for example "chrono::GetLog() << myobject;" /// ***OBSOLETE*** void StreamOUT(ChStreamOutAscii& mstream) { mstream << "\n" << "Matrix " << GetRows() << " rows, " << GetColumns() << " columns." << "\n"; for (int i = 0; i < ChMin(GetRows(), 8); i++) { for (int j = 0; j < ChMin(GetColumns(), 8); j++) mstream << GetElement(i, j) << " "; if (GetColumns() > 8) mstream << "..."; mstream << "\n"; } if (GetRows() > 8) mstream << "... \n\n"; } /// Method to allow serializing transient data into an ascii stream (ex. a file) /// as a Matlab .dat file (all numbers in a row, separated by space, then CR) void StreamOUTdenseMatlabFormat(ChStreamOutAscii& mstream) { for (int ii = 0; ii < this->GetRows(); ii++) { for (int jj = 0; jj < this->GetColumns(); jj++) { mstream << this->GetElement(ii, jj); if (jj < (this->GetColumns() - 1)) mstream << " "; } mstream << "\n"; } } // // MATH MEMBER FUNCTIONS. // For speed reasons, sometimes size checking of operands is left to the user! // /// Changes the sign of all the elements of this matrix, in place. void MatrNeg() { for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) = -ElementN(nel); } /// Sum two matrices, and stores the result in "this" matrix: [this]=[A]+[B]. template <class RealB, class RealC> void MatrAdd(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) { assert(matra.GetColumns() == matrb.GetColumns() && matra.rows == matrb.GetRows()); assert(this->columns == matrb.GetColumns() && this->rows == matrb.GetRows()); for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) = (Real)(matra.ElementN(nel) + matrb.ElementN(nel)); } /// Subtract two matrices, and stores the result in "this" matrix: [this]=[A]-[B]. template <class RealB, class RealC> void MatrSub(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) { assert(matra.GetColumns() == matrb.GetColumns() && matra.rows == matrb.GetRows()); assert(this->columns == matrb.GetColumns() && this->rows == matrb.GetRows()); for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) = (Real)(matra.ElementN(nel) - matrb.ElementN(nel)); } /// Increments this matrix with another matrix A, as: [this]+=[A] template <class RealB> void MatrInc(const ChMatrix<RealB>& matra) { assert(matra.GetColumns() == columns && matra.GetRows() == rows); for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) += (Real)matra.ElementN(nel); } /// Increments this matrix by \p val, as [this]+=val void MatrInc(Real val) { for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) += val; } /// Decrements this matrix with another matrix A, as: [this]-=[A] template <class RealB> void MatrDec(const ChMatrix<RealB>& matra) { assert(matra.GetColumns() == columns && matra.GetRows() == rows); for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) -= (Real)matra.ElementN(nel); } /// Scales a matrix, multiplying all elements by a constant value: [this]*=f void MatrScale(Real factor) { for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) *= factor; } /// Scales a matrix, multiplying all element by all other elements of /// matra (it is not the classical matrix multiplication!) template <class RealB> void MatrScale(const ChMatrix<RealB>& matra) { assert(matra.GetColumns() == columns && matra.GetRows() == rows); for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) *= (Real)matra.ElementN(nel); } /// Scales a matrix, dividing all elements by a constant value: [this]/=f void MatrDivScale(Real factor) { for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) /= factor; } /// Scales a matrix, dividing all element by all other elements of /// matra (it is not the classical matrix multiplication!) template <class RealB> void MatrDivScale(const ChMatrix<RealB>& matra) { assert(matra.GetColumns() == columns && matra.GetRows() == rows); for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) /= (Real)matra.ElementN(nel); } /// Multiplies two matrices, and stores the result in "this" matrix: [this]=[A]*[B]. template <class RealB, class RealC> void MatrMultiply(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) { assert(matra.GetColumns() == matrb.GetRows()); assert(this->rows == matra.GetRows()); assert(this->columns == matrb.GetColumns()); int col, row, colres; Real sum; for (colres = 0; colres < matrb.GetColumns(); ++colres) { for (row = 0; row < matra.GetRows(); ++row) { sum = 0; for (col = 0; col < matra.GetColumns(); ++col) sum += (Real)(matra.Element(row, col) * matrb.Element(col, colres)); SetElement(row, colres, sum); } } } #ifdef CHRONO_HAS_AVX /// Multiplies two matrices, and stores the result in "this" matrix: [this]=[A]*[B]. /// AVX implementation: The speed up is marginal if size of the matrices are small, e.g. 3*3 /// Generally, as the matra.GetColumns() increases the method performs better void MatrMultiplyAVX(const ChMatrix<double>& matra, const ChMatrix<double>& matrb) { assert(matra.GetColumns() == matrb.GetRows()); assert(this->rows == matra.GetRows()); assert(this->columns == matrb.GetColumns()); int A_Nrow = matra.GetRows(); int B_Nrow = matrb.GetRows(); int A_NCol = matra.GetColumns(); int B_NCol = matrb.GetColumns(); const double* A_add = matra.GetAddress(); const double* B_add = matrb.GetAddress(); double* this_Add = this->GetAddress(); for (int rowA = 0; rowA < A_Nrow; rowA++) { for (int colB = 0; colB < B_NCol; colB += 4) { __m256d sum = _mm256_setzero_pd(); for (int elem = 0; elem < A_NCol; elem++) { __m256d ymmA = _mm256_broadcast_sd(A_add + A_NCol * rowA + elem); __m256d ymmB = _mm256_loadu_pd(B_add + elem * B_NCol + colB); __m256d prod = _mm256_mul_pd(ymmA, ymmB); sum = _mm256_add_pd(sum, prod); } _mm256_storeu_pd(this_Add + rowA * B_NCol + colB, sum); } } } /// Multiplies two matrices (the second is considered transposed): [this]=[A]*[B]' /// Note: This method is faster than MatrMultiplyT if matra.GetColumns()%4=0 && matra.GetColumns()>8 /// It is still fast if matra.GetColumns() is large enough even if matra.GetColumns()%4!=0 void MatrMultiplyTAVX(const ChMatrix<double>& matra, const ChMatrix<double>& matrb) { assert(matra.GetColumns() == matrb.GetColumns()); assert(this->GetRows() == matra.GetRows()); assert(this->GetColumns() == matrb.GetRows()); int A_Nrow = matra.GetRows(); int B_Nrow = matrb.GetRows(); int A_NCol = matra.GetColumns(); int B_NCol = matrb.GetColumns(); const double* A_add = matra.GetAddress(); const double* B_add = matrb.GetAddress(); bool NeedsPadding = (B_NCol % 4 != 0); int CorrectFAT = ((B_NCol >> 2) << 2); for (int rowA = 0; rowA < A_Nrow; rowA++) { for (int rowB = 0; rowB < B_Nrow; rowB++) { int colB; double temp_sum = 0.0; __m256d sum = _mm256_setzero_pd(); for (colB = 0; colB < CorrectFAT; colB += 4) { __m256d ymmA = _mm256_loadu_pd(A_add + rowA * A_NCol + colB); __m256d ymmB = _mm256_loadu_pd(B_add + rowB * B_NCol + colB); __m256d prod = _mm256_mul_pd(ymmA, ymmB); sum = _mm256_add_pd(sum, prod); } sum = _mm256_hadd_pd(sum, sum); temp_sum = ((double*)&sum)[0] + ((double*)&sum)[2]; if (NeedsPadding) for (colB = CorrectFAT; colB < B_NCol; colB++) { temp_sum += (matra.Element(rowA, colB) * matrb.Element(rowB, colB)); } SetElement(rowA, rowB, temp_sum); } } } #endif #ifdef CHRONO_HAS_NEON /// Multiplies two matrices, and stores the result in "this" matrix: [this]=[A]*[B]. /// NEON implementation: The speed up is marginal if size of the matrices are small. /// Much like AVX, as the matra.GetColumns() increases the method performs better template <class RealB, class RealC> void MatrMultiplyNEON(const ChMatrix<double>& matra, const ChMatrix<double>& matrb) { assert(matra.GetColumns() == matrb.GetRows()); assert(this->rows == matra.GetRows()); assert(this->columns == matrb.GetColumns()); int A_Nrow = matra.GetRows(); int B_Nrow = matrb.GetRows(); int A_NCol = matra.GetColumns(); int B_NCol = matrb.GetColumns(); const double* A_add = matra.GetAddress(); const double* B_add = matrb.GetAddress(); double* this_Add = this->GetAddress(); // NEON doesn't provide direct zeroing, so we need to do it ourselves float64_t zero_mem = 0.0; float64x2_t zero_reg = vld1q_dup_f64(&zero_mem); for (int rowA = 0; rowA < A_Nrow; rowA++) { for (int colB = 0; colB < B_NCol; colB += 2) { float64x2_t sum = vmovq_n_f64(zero_reg); for (int elem = 0; elem < A_NCol; elem++) { float64x2_t V_2DA = vld1q_dup_f64(A_add + A_NCol * rowA + elem); float64x2_t V_2DB = vld1q_f64(B_add + elem * B_NCol + colB); sum = vfmaq_f64(sum, V_2DA, V_2DB); } vst1q_f64(this_Add + rowA * B_NCol + colB, sum); } } } #endif /// Multiplies two matrices (the second is considered transposed): [this]=[A]*[B]' /// Faster than doing B.MatrTranspose(); result.MatrMultiply(A,B); /// Note: no check on mistaken size of this! template <class RealB, class RealC> void MatrMultiplyT(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) { assert(matra.GetColumns() == matrb.GetColumns()); assert(this->rows == matra.GetRows()); assert(this->columns == matrb.GetRows()); int col, row, colres; Real sum; for (colres = 0; colres < matrb.GetRows(); ++colres) { for (row = 0; row < matra.GetRows(); ++row) { sum = 0; for (col = 0; col < matra.GetColumns(); ++col) sum += (Real)(matra.Element(row, col) * matrb.Element(colres, col)); SetElement(row, colres, sum); } } } /// Multiplies two matrices (the first is considered transposed): [this]=[A]'*[B] /// Faster than doing A.MatrTranspose(); result.MatrMultiply(A,B); template <class RealB, class RealC> void MatrTMultiply(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) { assert(matra.GetRows() == matrb.GetRows()); assert(this->rows == matra.GetColumns()); assert(this->columns == matrb.GetColumns()); int col, row, colres; Real sum; for (colres = 0; colres < matrb.GetColumns(); ++colres) { for (row = 0; row < matra.GetColumns(); ++row) { sum = 0; for (col = 0; col < (matra.GetRows()); ++col) sum += (Real)(matra.Element(col, row) * matrb.Element(col, colres)); SetElement(row, colres, sum); } } } /// Computes dot product between two column-matrices (vectors) with /// same size. Returns a scalar value. template <class RealB, class RealC> static Real MatrDot(const ChMatrix<RealB>& ma, const ChMatrix<RealC>& mb) { assert(ma.GetColumns() == mb.GetColumns() && ma.GetRows() == mb.GetRows()); Real tot = 0; for (int i = 0; i < ma.GetRows(); ++i) tot += (Real)(ma.ElementN(i) * mb.ElementN(i)); return tot; } /// Transpose this matrix in place void MatrTranspose() { if (columns == rows) // Square transp.is optimized { for (int row = 0; row < rows; ++row) for (int col = row; col < columns; ++col) if (row != col) { Real temp = Element(row, col); Element(row, col) = Element(col, row); Element(col, row) = temp; } int tmpr = rows; rows = columns; columns = tmpr; } else // Naive implementation for rectangular case. Not in-place. Slower. { ChMatrixDynamic<Real> matrcopy(*this); int tmpr = rows; rows = columns; columns = tmpr; // dont' realloc buffer, anyway for (int row = 0; row < rows; ++row) for (int col = 0; col < columns; ++col) Element(row, col) = matrcopy.Element(col, row); } } /// Returns the determinant of the matrix. /// Note! This method must be used only with max 4x4 matrices, /// otherwise it throws an exception. Real Det() { assert(this->GetRows() == this->GetColumns()); assert(this->GetRows() <= 4); if (this->GetRows() != this->GetColumns()) throw("Cannot compute matrix determinant because rectangular matrix"); if (this->GetRows() > 4) throw("Cannot compute matrix determinant because matr. larger than 3x3"); Real det = 0; switch (this->GetRows()) { case 1: det = (*this)(0, 0); break; case 2: det = (*this)(0, 0) * (*this)(1, 1) - (*this)(0, 1) * (*this)(1, 0); break; case 3: det = (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 2) + (*this)(0, 1) * (*this)(1, 2) * (*this)(2, 0) + (*this)(0, 2) * (*this)(1, 0) * (*this)(2, 1) - (*this)(2, 0) * (*this)(1, 1) * (*this)(0, 2) - (*this)(2, 1) * (*this)(1, 2) * (*this)(0, 0) - (*this)(2, 2) * (*this)(1, 0) * (*this)(0, 1); break; case 4: det = (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 3) + (*this)(0, 0) * (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 1) + (*this)(0, 0) * (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 2) + (*this)(0, 1) * (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 2) + (*this)(0, 1) * (*this)(1, 2) * (*this)(2, 0) * (*this)(3, 3) + (*this)(0, 1) * (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 0) + (*this)(0, 2) * (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 3) + (*this)(0, 2) * (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 0) + (*this)(0, 2) * (*this)(1, 3) * (*this)(2, 0) * (*this)(3, 1) + (*this)(0, 3) * (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 1) + (*this)(0, 3) * (*this)(1, 1) * (*this)(2, 0) * (*this)(3, 2) + (*this)(0, 3) * (*this)(1, 2) * (*this)(2, 1) * (*this)(3, 0) - (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 2) - (*this)(0, 0) * (*this)(1, 2) * (*this)(2, 1) * (*this)(3, 3) - (*this)(0, 0) * (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 1) - (*this)(0, 1) * (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 3) - (*this)(0, 1) * (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 0) - (*this)(0, 1) * (*this)(1, 3) * (*this)(2, 0) * (*this)(3, 2) - (*this)(0, 2) * (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 1) - (*this)(0, 2) * (*this)(1, 1) * (*this)(2, 0) * (*this)(3, 3) - (*this)(0, 2) * (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 0) - (*this)(0, 3) * (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 2) - (*this)(0, 3) * (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 0) - (*this)(0, 3) * (*this)(1, 2) * (*this)(2, 0) * (*this)(3, 1); break; } return det; } /// Returns the inverse of the matrix. /// Note! This method must be used only with max 4x4 matrices, /// otherwise it throws an exception. void MatrInverse() { assert(this->GetRows() == this->GetColumns()); assert(this->GetRows() <= 4); assert(this->Det() != 0); if (this->GetRows() != this->GetColumns()) throw("Cannot compute matrix inverse because rectangular matrix"); if (this->GetRows() > 4) throw("Cannot compute matrix inverse because matr. larger than 4x4"); if (this->Det() == 0) throw("Cannot compute matrix inverse because singular matrix"); switch (this->GetRows()) { case 1: (*this)(0, 0) = (1 / (*this)(0, 0)); break; case 2: { ChMatrixDynamic<Real> inv(2, 2); inv(0, 0) = (*this)(1, 1); inv(0, 1) = -(*this)(0, 1); inv(1, 1) = (*this)(0, 0); inv(1, 0) = -(*this)(1, 0); inv.MatrDivScale(this->Det()); this->CopyFromMatrix(inv); break; } case 3: { ChMatrixDynamic<Real> inv(3, 3); inv(0, 0) = (*this)(1, 1) * (*this)(2, 2) - (*this)(1, 2) * (*this)(2, 1); inv(0, 1) = (*this)(2, 1) * (*this)(0, 2) - (*this)(0, 1) * (*this)(2, 2); inv(0, 2) = (*this)(0, 1) * (*this)(1, 2) - (*this)(0, 2) * (*this)(1, 1); inv(1, 0) = (*this)(1, 2) * (*this)(2, 0) - (*this)(1, 0) * (*this)(2, 2); inv(1, 1) = (*this)(2, 2) * (*this)(0, 0) - (*this)(2, 0) * (*this)(0, 2); inv(1, 2) = (*this)(0, 2) * (*this)(1, 0) - (*this)(1, 2) * (*this)(0, 0); inv(2, 0) = (*this)(1, 0) * (*this)(2, 1) - (*this)(1, 1) * (*this)(2, 0); inv(2, 1) = (*this)(0, 1) * (*this)(2, 0) - (*this)(0, 0) * (*this)(2, 1); inv(2, 2) = (*this)(0, 0) * (*this)(1, 1) - (*this)(0, 1) * (*this)(1, 0); inv.MatrDivScale(this->Det()); this->CopyFromMatrix(inv); break; } case 4: { ChMatrixDynamic<Real> inv(4, 4); inv.SetElement( 0, 0, (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 1) - (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 1) + (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 2) - (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 2) - (*this)(1, 2) * (*this)(2, 1) * (*this)(3, 3) + (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 3)); inv.SetElement( 0, 1, (*this)(0, 3) * (*this)(2, 2) * (*this)(3, 1) - (*this)(0, 2) * (*this)(2, 3) * (*this)(3, 1) - (*this)(0, 3) * (*this)(2, 1) * (*this)(3, 2) + (*this)(0, 1) * (*this)(2, 3) * (*this)(3, 2) + (*this)(0, 2) * (*this)(2, 1) * (*this)(3, 3) - (*this)(0, 1) * (*this)(2, 2) * (*this)(3, 3)); inv.SetElement( 0, 2, (*this)(0, 2) * (*this)(1, 3) * (*this)(3, 1) - (*this)(0, 3) * (*this)(1, 2) * (*this)(3, 1) + (*this)(0, 3) * (*this)(1, 1) * (*this)(3, 2) - (*this)(0, 1) * (*this)(1, 3) * (*this)(3, 2) - (*this)(0, 2) * (*this)(1, 1) * (*this)(3, 3) + (*this)(0, 1) * (*this)(1, 2) * (*this)(3, 3)); inv.SetElement( 0, 3, (*this)(0, 3) * (*this)(1, 2) * (*this)(2, 1) - (*this)(0, 2) * (*this)(1, 3) * (*this)(2, 1) - (*this)(0, 3) * (*this)(1, 1) * (*this)(2, 2) + (*this)(0, 1) * (*this)(1, 3) * (*this)(2, 2) + (*this)(0, 2) * (*this)(1, 1) * (*this)(2, 3) - (*this)(0, 1) * (*this)(1, 2) * (*this)(2, 3)); inv.SetElement( 1, 0, (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 0) - (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 0) - (*this)(1, 3) * (*this)(2, 0) * (*this)(3, 2) + (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 2) + (*this)(1, 2) * (*this)(2, 0) * (*this)(3, 3) - (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 3)); inv.SetElement( 1, 1, (*this)(0, 2) * (*this)(2, 3) * (*this)(3, 0) - (*this)(0, 3) * (*this)(2, 2) * (*this)(3, 0) + (*this)(0, 3) * (*this)(2, 0) * (*this)(3, 2) - (*this)(0, 0) * (*this)(2, 3) * (*this)(3, 2) - (*this)(0, 2) * (*this)(2, 0) * (*this)(3, 3) + (*this)(0, 0) * (*this)(2, 2) * (*this)(3, 3)); inv.SetElement( 1, 2, (*this)(0, 3) * (*this)(1, 2) * (*this)(3, 0) - (*this)(0, 2) * (*this)(1, 3) * (*this)(3, 0) - (*this)(0, 3) * (*this)(1, 0) * (*this)(3, 2) + (*this)(0, 0) * (*this)(1, 3) * (*this)(3, 2) + (*this)(0, 2) * (*this)(1, 0) * (*this)(3, 3) - (*this)(0, 0) * (*this)(1, 2) * (*this)(3, 3)); inv.SetElement( 1, 3, (*this)(0, 2) * (*this)(1, 3) * (*this)(2, 0) - (*this)(0, 3) * (*this)(1, 2) * (*this)(2, 0) + (*this)(0, 3) * (*this)(1, 0) * (*this)(2, 2) - (*this)(0, 0) * (*this)(1, 3) * (*this)(2, 2) - (*this)(0, 2) * (*this)(1, 0) * (*this)(2, 3) + (*this)(0, 0) * (*this)(1, 2) * (*this)(2, 3)); inv.SetElement( 2, 0, (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 0) - (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 0) + (*this)(1, 3) * (*this)(2, 0) * (*this)(3, 1) - (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 1) - (*this)(1, 1) * (*this)(2, 0) * (*this)(3, 3) + (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 3)); inv.SetElement( 2, 1, (*this)(0, 3) * (*this)(2, 1) * (*this)(3, 0) - (*this)(0, 1) * (*this)(2, 3) * (*this)(3, 0) - (*this)(0, 3) * (*this)(2, 0) * (*this)(3, 1) + (*this)(0, 0) * (*this)(2, 3) * (*this)(3, 1) + (*this)(0, 1) * (*this)(2, 0) * (*this)(3, 3) - (*this)(0, 0) * (*this)(2, 1) * (*this)(3, 3)); inv.SetElement( 2, 2, (*this)(0, 1) * (*this)(1, 3) * (*this)(3, 0) - (*this)(0, 3) * (*this)(1, 1) * (*this)(3, 0) + (*this)(0, 3) * (*this)(1, 0) * (*this)(3, 1) - (*this)(0, 0) * (*this)(1, 3) * (*this)(3, 1) - (*this)(0, 1) * (*this)(1, 0) * (*this)(3, 3) + (*this)(0, 0) * (*this)(1, 1) * (*this)(3, 3)); inv.SetElement( 2, 3, (*this)(0, 3) * (*this)(1, 1) * (*this)(2, 0) - (*this)(0, 1) * (*this)(1, 3) * (*this)(2, 0) - (*this)(0, 3) * (*this)(1, 0) * (*this)(2, 1) + (*this)(0, 0) * (*this)(1, 3) * (*this)(2, 1) + (*this)(0, 1) * (*this)(1, 0) * (*this)(2, 3) - (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 3)); inv.SetElement( 3, 0, (*this)(1, 2) * (*this)(2, 1) * (*this)(3, 0) - (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 0) - (*this)(1, 2) * (*this)(2, 0) * (*this)(3, 1) + (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 1) + (*this)(1, 1) * (*this)(2, 0) * (*this)(3, 2) - (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 2)); inv.SetElement( 3, 1, (*this)(0, 1) * (*this)(2, 2) * (*this)(3, 0) - (*this)(0, 2) * (*this)(2, 1) * (*this)(3, 0) + (*this)(0, 2) * (*this)(2, 0) * (*this)(3, 1) - (*this)(0, 0) * (*this)(2, 2) * (*this)(3, 1) - (*this)(0, 1) * (*this)(2, 0) * (*this)(3, 2) + (*this)(0, 0) * (*this)(2, 1) * (*this)(3, 2)); inv.SetElement( 3, 2, (*this)(0, 2) * (*this)(1, 1) * (*this)(3, 0) - (*this)(0, 1) * (*this)(1, 2) * (*this)(3, 0) - (*this)(0, 2) * (*this)(1, 0) * (*this)(3, 1) + (*this)(0, 0) * (*this)(1, 2) * (*this)(3, 1) + (*this)(0, 1) * (*this)(1, 0) * (*this)(3, 2) - (*this)(0, 0) * (*this)(1, 1) * (*this)(3, 2)); inv.SetElement( 3, 3, (*this)(0, 1) * (*this)(1, 2) * (*this)(2, 0) - (*this)(0, 2) * (*this)(1, 1) * (*this)(2, 0) + (*this)(0, 2) * (*this)(1, 0) * (*this)(2, 1) - (*this)(0, 0) * (*this)(1, 2) * (*this)(2, 1) - (*this)(0, 1) * (*this)(1, 0) * (*this)(2, 2) + (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 2)); inv.MatrDivScale(this->Det()); this->CopyFromMatrix(inv); break; } } } /// Returns true if vector is identical to other matrix bool Equals(const ChMatrix<Real>& other) const { return Equals(other, 0.0); } /// Returns true if vector equals another vector, within a tolerance 'tol' bool Equals(const ChMatrix<Real>& other, Real tol) const { if ((other.GetColumns() != this->columns) || (other.GetRows() != this->rows)) return false; for (int nel = 0; nel < rows * columns; ++nel) if (fabs(ElementN(nel) - other.ElementN(nel)) > tol) return false; return true; } /// Multiplies this 3x4 matrix by a quaternion, as v=[G]*q /// The matrix must be 3x4. /// \return The result of the multiplication, i.e. a vector. template <class RealB> ChVector<Real> Matr34_x_Quat(const ChQuaternion<RealB>& qua) { assert((rows == 3) && (columns == 4)); return ChVector<Real>(Get34Element(0, 0) * (Real)qua.e0() + Get34Element(0, 1) * (Real)qua.e1() + Get34Element(0, 2) * (Real)qua.e2() + Get34Element(0, 3) * (Real)qua.e3(), Get34Element(1, 0) * (Real)qua.e0() + Get34Element(1, 1) * (Real)qua.e1() + Get34Element(1, 2) * (Real)qua.e2() + Get34Element(1, 3) * (Real)qua.e3(), Get34Element(2, 0) * (Real)qua.e0() + Get34Element(2, 1) * (Real)qua.e1() + Get34Element(2, 2) * (Real)qua.e2() + Get34Element(2, 3) * (Real)qua.e3()); } /// Multiplies this 3x4 matrix (transposed) by a vector, as q=[G]'*v /// The matrix must be 3x4. /// \return The result of the multiplication, i.e. a quaternion. template <class RealB> ChQuaternion<Real> Matr34T_x_Vect(const ChVector<RealB>& va) { assert((rows == 3) && (columns == 4)); return ChQuaternion<Real>( Get34Element(0, 0) * (Real)va.x() + Get34Element(1, 0) * (Real)va.y() + Get34Element(2, 0) * (Real)va.z(), Get34Element(0, 1) * (Real)va.x() + Get34Element(1, 1) * (Real)va.y() + Get34Element(2, 1) * (Real)va.z(), Get34Element(0, 2) * (Real)va.x() + Get34Element(1, 2) * (Real)va.y() + Get34Element(2, 2) * (Real)va.z(), Get34Element(0, 3) * (Real)va.x() + Get34Element(1, 3) * (Real)va.y() + Get34Element(2, 3) * (Real)va.z()); } /// Multiplies this 4x4 matrix (transposed) by a quaternion, /// The matrix must be 4x4. /// \return The result of the multiplication, i.e. a quaternion. template <class RealB> ChQuaternion<Real> Matr44_x_Quat(const ChQuaternion<RealB>& qua) { assert((rows == 4) && (columns == 4)); return ChQuaternion<Real>(Get44Element(0, 0) * (Real)qua.e0() + Get44Element(0, 1) * (Real)qua.e1() + Get44Element(0, 2) * (Real)qua.e2() + Get44Element(0, 3) * (Real)qua.e3(), Get44Element(1, 0) * (Real)qua.e0() + Get44Element(1, 1) * (Real)qua.e1() + Get44Element(1, 2) * (Real)qua.e2() + Get44Element(1, 3) * (Real)qua.e3(), Get44Element(2, 0) * (Real)qua.e0() + Get44Element(2, 1) * (Real)qua.e1() + Get44Element(2, 2) * (Real)qua.e2() + Get44Element(2, 3) * (Real)qua.e3(), Get44Element(3, 0) * (Real)qua.e0() + Get44Element(3, 1) * (Real)qua.e1() + Get44Element(3, 2) * (Real)qua.e2() + Get44Element(3, 3) * (Real)qua.e3()); } /// Transposes only the lower-right 3x3 submatrix of a hemisymmetric 4x4 matrix, /// used when the 4x4 matrix is a "star" matrix [q] coming from a quaternion q: /// the non commutative quat. product is: /// q1 x q2 = [q1]*q2 = [q2st]*q1 /// where [q2st] is the "semi-transpose of [q2]. void MatrXq_SemiTranspose() { SetElement(1, 2, -GetElement(1, 2)); SetElement(1, 3, -GetElement(1, 3)); SetElement(2, 1, -GetElement(2, 1)); SetElement(2, 3, -GetElement(2, 3)); SetElement(3, 1, -GetElement(3, 1)); SetElement(3, 2, -GetElement(3, 2)); } /// Change the sign of the 2nd, 3rd and 4th columns of a 4x4 matrix, /// The product between a quaternion q1 and the conjugate of q2 (q2'), is: /// q1 x q2' = [q1]*q2' = [q1sn]*q2 /// where [q1sn] is the semi-negation of the 4x4 matrix [q1]. void MatrXq_SemiNeg() { for (int i = 0; i < rows; ++i) for (int j = 1; j < columns; ++j) SetElement(i, j, -GetElement(i, j)); } /// Gets the norm infinite of the matrix, i.e. the max. /// of its elements in absolute value. Real NormInf() const { Real norm = 0; for (int nel = 0; nel < rows * columns; ++nel) if ((fabs(ElementN(nel))) > norm) norm = fabs(ElementN(nel)); return norm; } /// Gets the norm two of the matrix, i.e. the square root /// of the sum of the elements squared. Real NormTwo() const { Real norm = 0; for (int nel = 0; nel < rows * columns; ++nel) norm += ElementN(nel) * ElementN(nel); return (sqrt(norm)); } /// Finds max value among the values of the matrix Real Max() const { Real mmax = GetElement(0, 0); for (int nel = 0; nel < rows * columns; ++nel) if (ElementN(nel) > mmax) mmax = ElementN(nel); return mmax; } /// Finds min value among the values of the matrix Real Min() const { Real mmin = GetElement(0, 0); for (int nel = 0; nel < rows * columns; ++nel) if (ElementN(nel) < mmin) mmin = ElementN(nel); return mmin; } /// Linear interpolation of two matrices. Parameter mx must be 0...1. /// [this] =(1-x)[A]+ (x)[B] Matrices must have the same size!! void LinInterpolate(const ChMatrix<Real>& matra, const ChMatrix<Real>& matrb, Real mx) { assert(matra.columns == matrb.columns && matra.rows == matrb.rows); for (int nel = 0; nel < rows * columns; nel++) ElementN(nel) = matra.ElementN(nel) * (1 - mx) + matrb.ElementN(nel) * (mx); } /// Fills a matrix or a vector with a bilinear interpolation, /// from corner values (as a u-v patch). void RowColInterp(Real vmin, Real vmax, Real umin, Real umax) { for (int iu = 0; iu < GetColumns(); iu++) for (int iv = 0; iv < GetRows(); iv++) { if (GetRows() > 1) Element(iv, iu) = vmin + (vmax - vmin) * ((Real)iv / ((Real)(GetRows() - 1))); if (GetColumns() > 1) Element(iv, iu) += umin + (umax - umin) * ((Real)iu / ((Real)(GetColumns() - 1))); } } // // BOOKKEEPING // /// Paste a matrix "matra" into "this", inserting at location insrow-inscol. /// Normal copy for insrow=inscol=0 template <class RealB> void PasteMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) { for (int i = 0; i < matra.GetRows(); ++i) for (int j = 0; j < matra.GetColumns(); ++j) Element(i + insrow, j + inscol) = (Real)matra.Element(i, j); } /// Paste a matrix "matra" into "this", inserting at location insrow-inscol /// and performing a sum with the preexisting values. template <class RealB> void PasteSumMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) { for (int i = 0; i < matra.GetRows(); ++i) for (int j = 0; j < matra.GetColumns(); ++j) Element(i + insrow, j + inscol) += (Real)matra.Element(i, j); } /// Paste a matrix "matra", transposed, into "this", inserting at location insrow-inscol. /// Normal copy for insrow=inscol=0 template <class RealB> void PasteTranspMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) { for (int i = 0; i < matra.GetRows(); ++i) for (int j = 0; j < matra.GetColumns(); ++j) Element(j + insrow, i + inscol) = (Real)matra.Element(i, j); } /// Paste a matrix "matra", transposed, into "this", inserting at location insrow-inscol /// and performing a sum with the preexisting values. template <class RealB> void PasteSumTranspMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) { for (int i = 0; i < matra.GetRows(); ++i) for (int j = 0; j < matra.GetColumns(); ++j) Element(j + insrow, i + inscol) += (Real)matra.Element(i, j); } /// Paste a clipped portion of the matrix "matra" into "this", /// inserting the clip (of size nrows, ncolumns) at the location insrow-inscol. template <class RealB> void PasteClippedMatrix(const ChMatrix<RealB>& matra, int cliprow, int clipcol, int nrows, int ncolumns, int insrow, int inscol) { for (int i = 0; i < nrows; ++i) for (int j = 0; j < ncolumns; ++j) Element(i + insrow, j + inscol) = (Real)matra.Element(i + cliprow, j + clipcol); } /// Paste a clipped portion of the matrix "matra" into "this", where "this" /// is a vector (of ChMatrix type), /// inserting the clip (of size nrows, ncolumns) at the location insindex. template <class RealB> void PasteClippedMatrixToVector(const ChMatrix<RealB>& matra, int cliprow, int clipcol, int nrows, int ncolumns, int insindex) { for (int i = 0; i < nrows; ++i) for (int j = 0; j < ncolumns; ++j) ElementN(insindex + i * ncolumns + j) = (Real)matra.Element(cliprow + i, clipcol + j); } /// Paste a clipped portion of a vector into "this", where "this" /// is a matrix (of ChMatrix type), /// inserting the clip (of size nrows, ncolumns) at the location insindex. template <class RealB> void PasteClippedVectorToMatrix(const ChMatrix<RealB>& matra, int cliprow, int clipcol, int nrows, int ncolumns, int insindex) { for (int i = 0; i < nrows; ++i) for (int j = 0; j < ncolumns; ++j) Element(i + cliprow, j + clipcol) = (Real)matra.ElementN(insindex + i * ncolumns + j); } /// Paste a clipped portion of the matrix "matra" into "this", performing a sum with preexisting values, /// inserting the clip (of size nrows, ncolumns) at the location insrow-inscol. template <class RealB> void PasteSumClippedMatrix(const ChMatrix<RealB>& matra, int cliprow, int clipcol, int nrows, int ncolumns, int insrow, int inscol) { for (int i = 0; i < nrows; ++i) for (int j = 0; j < ncolumns; ++j) #pragma omp atomic Element(i + insrow, j + inscol) += (Real)matra.Element(i + cliprow, j + clipcol); } /// Paste a vector "va" into the matrix. template <class RealB> void PasteVector(const ChVector<RealB>& va, int insrow, int inscol) { SetElement(insrow + 0, inscol, (Real)va.x()); SetElement(insrow + 1, inscol, (Real)va.y()); SetElement(insrow + 2, inscol, (Real)va.z()); } /// Paste a vector "va" into the matrix, summing it with preexisting values. template <class RealB> void PasteSumVector(const ChVector<RealB>& va, int insrow, int inscol) { Element(insrow + 0, inscol) += (Real)va.x(); Element(insrow + 1, inscol) += (Real)va.y(); Element(insrow + 2, inscol) += (Real)va.z(); } /// Paste a vector "va" into the matrix, subtracting it from preexisting values. template <class RealB> void PasteSubVector(const ChVector<RealB>& va, int insrow, int inscol) { Element(insrow + 0, inscol) -= (Real)va.x(); Element(insrow + 1, inscol) -= (Real)va.y(); Element(insrow + 2, inscol) -= (Real)va.z(); } /// Paste a quaternion into the matrix. template <class RealB> void PasteQuaternion(const ChQuaternion<RealB>& qa, int insrow, int inscol) { SetElement(insrow + 0, inscol, (Real)qa.e0()); SetElement(insrow + 1, inscol, (Real)qa.e1()); SetElement(insrow + 2, inscol, (Real)qa.e2()); SetElement(insrow + 3, inscol, (Real)qa.e3()); } /// Paste a quaternion into the matrix, summing it with preexisting values. template <class RealB> void PasteSumQuaternion(const ChQuaternion<RealB>& qa, int insrow, int inscol) { Element(insrow + 0, inscol) += (Real)qa.e0(); Element(insrow + 1, inscol) += (Real)qa.e1(); Element(insrow + 2, inscol) += (Real)qa.e2(); Element(insrow + 3, inscol) += (Real)qa.e3(); } /// Paste a coordsys into the matrix. template <class RealB> void PasteCoordsys(const ChCoordsys<RealB>& cs, int insrow, int inscol) { PasteVector(cs.pos, insrow, inscol); PasteQuaternion(cs.rot, insrow + 3, inscol); } /// Returns the vector clipped from insrow, inscol. ChVector<Real> ClipVector(int insrow, int inscol) const { return ChVector<Real>(Element(insrow, inscol), Element(insrow + 1, inscol), Element(insrow + 2, inscol)); } /// Returns the quaternion clipped from insrow, inscol. ChQuaternion<Real> ClipQuaternion(int insrow, int inscol) const { return ChQuaternion<Real>(Element(insrow, inscol), Element(insrow + 1, inscol), Element(insrow + 2, inscol), Element(insrow + 3, inscol)); } /// Returns the coordsys clipped from insrow, inscol. ChCoordsys<Real> ClipCoordsys(int insrow, int inscol) const { return ChCoordsys<Real>(ClipVector(insrow, inscol), ClipQuaternion(insrow + 3, inscol)); } // // MULTIBODY SPECIFIC MATH FUCTION // /// Fills a 4x4 matrix as the "star" matrix, representing quaternion cross product. /// That is, given two quaternions a and b, aXb= [Astar]*b template <class RealB> void Set_Xq_matrix(const ChQuaternion<RealB>& q) { Set44Element(0, 0, (Real)q.e0()); Set44Element(0, 1, -(Real)q.e1()); Set44Element(0, 2, -(Real)q.e2()); Set44Element(0, 3, -(Real)q.e3()); Set44Element(1, 0, (Real)q.e1()); Set44Element(1, 1, (Real)q.e0()); Set44Element(1, 2, -(Real)q.e3()); Set44Element(1, 3, (Real)q.e2()); Set44Element(2, 0, (Real)q.e2()); Set44Element(2, 1, (Real)q.e3()); Set44Element(2, 2, (Real)q.e0()); Set44Element(2, 3, -(Real)q.e1()); Set44Element(3, 0, (Real)q.e3()); Set44Element(3, 1, -(Real)q.e2()); Set44Element(3, 2, (Real)q.e1()); Set44Element(3, 3, (Real)q.e0()); } }; } // end namespace chrono #endif
exemplo_critical.c
#include "exemplos.h" //Exemplo Critical int main(int argc, char **argv) { int i, thread_id; int global_nloops, private_nloops; global_nloops = 0; #pragma omp parallel private(private_nloops, thread_id) { private_nloops = 0; thread_id = omp_get_thread_num(); #pragma omp for for (i = 0; i < 100000; ++i) { ++private_nloops; } #pragma omp critical printf("Thread %d adding its iterations (%d) to the sum (%d)...\n", thread_id, private_nloops, global_nloops); global_nloops += private_nloops; printf("...total nloops now equals %d.\n", global_nloops); } printf("The total number of loop iterations is %d\n", global_nloops); return 0; }
par_multi_interp.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" /*-------------------------------------------------------------------------- * hypre_ParAMGBuildMultipass * This routine implements Stuben's direct interpolation with multiple passes. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildMultipassHost( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int P_max_elmts, HYPRE_Int weight_option, hypre_ParCSRMatrix **P_ptr ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MULTIPASS_INTERP] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = NULL; //HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = NULL; /*HYPRE_BigInt *col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd); HYPRE_BigInt *col_map_offd = NULL;*/ HYPRE_Int num_cols_offd; hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; /*at first counter of nonzero cols for each row, finally will be pointer to start of row */ HYPRE_Int *P_diag_j; hypre_CSRMatrix *P_offd; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i; /*at first counter of nonzero cols for each row, finally will be pointer to start of row */ HYPRE_Int *P_offd_j = NULL; HYPRE_Int num_sends = 0; HYPRE_Int *int_buf_data = NULL; HYPRE_BigInt *big_buf_data = NULL; HYPRE_Int *send_map_start; HYPRE_Int *send_map_elmt; HYPRE_Int *send_procs; HYPRE_Int num_recvs = 0; HYPRE_Int *recv_vec_start; HYPRE_Int *recv_procs; HYPRE_Int *new_recv_vec_start = NULL; HYPRE_Int **Pext_send_map_start = NULL; HYPRE_Int **Pext_recv_vec_start = NULL; HYPRE_Int *Pext_start = NULL; HYPRE_Int *P_ncols = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; HYPRE_Int *P_marker; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *C_array; HYPRE_Int *C_array_offd = NULL; HYPRE_Int *pass_array = NULL; /* contains points ordered according to pass */ HYPRE_Int *pass_pointer = NULL; /* pass_pointer[j] contains pointer to first point of pass j contained in pass_array */ HYPRE_Int *P_diag_start; HYPRE_Int *P_offd_start = NULL; HYPRE_Int **P_diag_pass; HYPRE_Int **P_offd_pass = NULL; HYPRE_Int **Pext_pass = NULL; HYPRE_BigInt *big_temp_pass = NULL; HYPRE_BigInt **new_elmts = NULL; /* new neighbors generated in each pass */ HYPRE_Int *new_counter = NULL; /* contains no. of new neighbors for each pass */ HYPRE_Int *loc = NULL; /* contains locations for new neighbor connections in int_o_buffer to avoid searching */ HYPRE_Int *Pext_i = NULL; /*contains P_diag_i and P_offd_i info for nonzero cols of off proc neighbors */ HYPRE_BigInt *Pext_send_buffer = NULL; /* used to collect global nonzero col ids in P_diag for send_map_elmts */ HYPRE_Int *map_S_to_new = NULL; HYPRE_BigInt *new_col_map_offd = NULL; HYPRE_BigInt *col_map_offd_P = NULL; HYPRE_Int *permute = NULL; HYPRE_BigInt *big_permute = NULL; HYPRE_Int cnt; HYPRE_Int cnt_nz; HYPRE_Int total_nz; HYPRE_Int pass; HYPRE_Int num_passes; HYPRE_Int max_num_passes = 10; HYPRE_Int n_fine; HYPRE_Int n_coarse = 0; HYPRE_Int n_coarse_offd = 0; HYPRE_Int n_SF = 0; HYPRE_Int n_SF_offd = 0; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int *assigned = NULL; HYPRE_Int *assigned_offd = NULL; HYPRE_Real *Pext_send_data = NULL; HYPRE_Real *Pext_data = NULL; HYPRE_Real sum_C, sum_N; HYPRE_Real sum_C_pos, sum_C_neg; HYPRE_Real sum_N_pos, sum_N_neg; HYPRE_Real diagonal; HYPRE_Real alfa = 1.0; HYPRE_Real beta = 1.0; HYPRE_Int j_start; HYPRE_Int j_end; HYPRE_Int i,i1; HYPRE_Int j,j1; HYPRE_Int k,k1,k2,k3; HYPRE_BigInt big_k1; HYPRE_Int pass_array_size; HYPRE_BigInt global_pass_array_size; HYPRE_BigInt local_pass_array_size; HYPRE_Int my_id, num_procs; HYPRE_Int index, start; HYPRE_BigInt my_first_cpt; HYPRE_BigInt total_global_cpts; HYPRE_Int p_cnt; HYPRE_Int total_nz_offd; HYPRE_Int cnt_nz_offd; HYPRE_Int cnt_offd, cnt_new; HYPRE_Int no_break; HYPRE_Int not_found; HYPRE_Int Pext_send_size; HYPRE_Int Pext_recv_size; HYPRE_Int old_Pext_send_size; HYPRE_Int old_Pext_recv_size; HYPRE_Int P_offd_size = 0; HYPRE_Int local_index = -1; HYPRE_Int new_num_cols_offd = 0; HYPRE_Int num_cols_offd_P; /* Threading variables */ HYPRE_Int my_thread_num, num_threads, thread_start, thread_stop; HYPRE_Int pass_length; HYPRE_Int *tmp_marker, *tmp_marker_offd; HYPRE_Int *tmp_array, *tmp_array_offd; HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); HYPRE_Int * cnt_nz_per_thread; HYPRE_Int * cnt_nz_offd_per_thread; /* HYPRE_Real wall_time; wall_time = hypre_MPI_Wtime(); */ /* Initialize threading variables */ max_num_threads[0] = hypre_NumThreads(); cnt_nz_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); cnt_nz_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); for(i=0; i < max_num_threads[0]; i++) { cnt_nz_offd_per_thread[i] = 0; cnt_nz_per_thread[i] = 0; } /*----------------------------------------------------------------------- * Access the CSR vectors for A and S. Also get size of fine grid. *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); my_first_cpt = num_cpts_global[0]; /* total_global_cpts = 0; */ if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); if (!comm_pkg) { comm_pkg = hypre_ParCSRMatrixCommPkg(A); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } } //col_map_offd = col_map_offd_A; num_cols_offd = num_cols_offd_A; if (num_cols_offd_A) { A_offd_data = hypre_CSRMatrixData(A_offd); A_offd_j = hypre_CSRMatrixJ(A_offd); } if (num_cols_offd) S_offd_j = hypre_CSRMatrixJ(S_offd); n_fine = hypre_CSRMatrixNumRows(A_diag); /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ if (n_fine) fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); n_coarse = 0; n_SF = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:n_coarse,n_SF ) HYPRE_SMP_SCHEDULE #endif for (i=0; i < n_fine; i++) if (CF_marker[i] == 1) n_coarse++; else if (CF_marker[i] == -3) n_SF++; pass_array_size = n_fine-n_coarse-n_SF; if (pass_array_size) pass_array = hypre_CTAlloc(HYPRE_Int, pass_array_size, HYPRE_MEMORY_HOST); pass_pointer = hypre_CTAlloc(HYPRE_Int, max_num_passes+1, HYPRE_MEMORY_HOST); if (n_fine) assigned = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); if (n_coarse) C_array = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); if (num_cols_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); send_map_start = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmt = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); recv_vec_start = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); if (send_map_start[num_sends]) { int_buf_data = hypre_CTAlloc(HYPRE_Int, send_map_start[num_sends], HYPRE_MEMORY_HOST); big_buf_data = hypre_CTAlloc(HYPRE_BigInt, send_map_start[num_sends], HYPRE_MEMORY_HOST); } } index = 0; for (i=0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i+1]; j++) int_buf_data[index++] = CF_marker[send_map_elmt[j]]; } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (num_functions > 1) { index = 0; for (i=0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i+1]; j++) int_buf_data[index++] = dof_func[send_map_elmt[j]]; } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } } n_coarse_offd = 0; n_SF_offd = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:n_coarse_offd,n_SF_offd) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_offd; i++) if (CF_marker_offd[i] == 1) n_coarse_offd++; else if (CF_marker_offd[i] == -3) n_SF_offd++; if (num_cols_offd) { assigned_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); map_S_to_new = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, n_coarse_offd, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------------- * First Pass: determine the maximal size of P, and elementsPerRow[i]. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Assigned points are points for which we know an interpolation * formula already, and which are thus available to interpolate from. * assigned[i]=0 for C points, and 1, 2, 3, ... for F points, depending * in which pass their interpolation formula is determined. * * pass_array contains the points ordered according to its pass, i.e. * | C-points | points of pass 1 | points of pass 2 | .... * C_points are points 0 through pass_pointer[1]-1, * points of pass k (0 < k < num_passes) are contained in points * pass_pointer[k] through pass_pointer[k+1]-1 of pass_array . * * pass_array is also used to avoid going through all points for each pass, * i,e. at the bginning it contains all points in descending order starting * with n_fine-1. Then starting from the last point, we evaluate whether * it is a C_point (pass 0). If it is the point is brought to the front * and the length of the points to be searched is shortened. This is * done until the parameter cnt (which determines the first point of * pass_array to be searched) becomes n_fine. Then all points have been * assigned a pass number. *-----------------------------------------------------------------------*/ cnt = 0; p_cnt = pass_array_size-1; P_diag_i[0] = 0; P_offd_i[0] = 0; for (i = 0; i < n_fine; i++) { if (CF_marker[i] == 1) { fine_to_coarse[i] = cnt; /* this C point is assigned index coarse_counter on coarse grid, and in column of P */ C_array[cnt++] = i; assigned[i] = 0; P_diag_i[i+1] = 1; /* one element in row i1 of P */ P_offd_i[i+1] = 0; } else if (CF_marker[i] == -1) { pass_array[p_cnt--] = i; P_diag_i[i+1] = 0; P_offd_i[i+1] = 0; assigned[i] = -1; fine_to_coarse[i] = -1; } else { P_diag_i[i+1] = 0; P_offd_i[i+1] = 0; assigned[i] = -1; fine_to_coarse[i] = -1; } } index = 0; for (i=0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i+1]; j++) { big_buf_data[index] = (HYPRE_BigInt)fine_to_coarse[send_map_elmt[j]]; if (big_buf_data[index] > -1) big_buf_data[index] += my_first_cpt; index++; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } new_recv_vec_start = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); if (n_coarse_offd) C_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST); cnt = 0; new_recv_vec_start[0] = 0; for (j = 0; j < num_recvs; j++) { for (i = recv_vec_start[j]; i < recv_vec_start[j+1]; i++) { if (CF_marker_offd[i] == 1) { map_S_to_new[i] = cnt; C_array_offd[cnt] = i; new_col_map_offd[cnt++] = fine_to_coarse_offd[i]; assigned_offd[i] = 0; } else { assigned_offd[i] = -1; map_S_to_new[i] = -1; } } new_recv_vec_start[j+1] = cnt; } cnt = 0; hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Mark all local neighbors of C points as 'assigned'. *-----------------------------------------------------------------------*/ pass_pointer[0] = 0; pass_pointer[1] = 0; total_nz = n_coarse; /* accumulates total number of nonzeros in P_diag */ total_nz_offd = 0; /* accumulates total number of nonzeros in P_offd */ cnt = 0; cnt_offd = 0; cnt_nz = 0; cnt_nz_offd = 0; for (i = pass_array_size-1; i > cnt-1; i--) { i1 = pass_array[i]; for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (CF_marker[j1] == 1) { P_diag_i[i1+1]++; cnt_nz++; assigned[i1] = 1; } } for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (CF_marker_offd[j1] == 1) { P_offd_i[i1+1]++; cnt_nz_offd++; assigned[i1] = 1; } } if (assigned[i1] == 1) { pass_array[i++] = pass_array[cnt]; pass_array[cnt++] = i1; } } pass_pointer[2] = cnt; /*----------------------------------------------------------------------- * All local neighbors are assigned, now need to exchange the boundary * info for assigned strong neighbors. *-----------------------------------------------------------------------*/ index = 0; for (i=0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i+1]; j++) { int_buf_data[index++] = assigned[send_map_elmt[j]]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, assigned_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } /*----------------------------------------------------------------------- * Now we need to determine strong neighbors of points of pass 1, etc. * we need to update assigned_offd after each pass *-----------------------------------------------------------------------*/ pass = 2; local_pass_array_size = (HYPRE_BigInt)(pass_array_size - cnt); hypre_MPI_Allreduce(&local_pass_array_size, &global_pass_array_size, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); while (global_pass_array_size && pass < max_num_passes) { for (i = pass_array_size-1; i > cnt-1; i--) { i1 = pass_array[i]; no_break = 1; for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass-1) { pass_array[i++] = pass_array[cnt]; pass_array[cnt++] = i1; assigned[i1] = pass; no_break = 0; break; } } if (no_break) { for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass-1) { pass_array[i++] = pass_array[cnt]; pass_array[cnt++] = i1; assigned[i1] = pass; break; } } } } /*hypre_printf("pass %d remaining points %d \n", pass, local_pass_array_size);*/ pass++; pass_pointer[pass] = cnt; local_pass_array_size = (HYPRE_BigInt)(pass_array_size - cnt); hypre_MPI_Allreduce(&local_pass_array_size, &global_pass_array_size, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); index = 0; for (i=0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i+1]; j++) { int_buf_data[index++] = assigned[send_map_elmt[j]]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, assigned_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } } hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST); num_passes = pass; P_diag_pass = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST); /* P_diag_pass[i] will contain all column numbers for points of pass i */ P_diag_pass[1] = hypre_CTAlloc(HYPRE_Int, cnt_nz, HYPRE_MEMORY_HOST); P_diag_start = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); /* P_diag_start[i] contains pointer to begin of column numbers in P_pass for point i, P_diag_i[i+1] contains number of columns for point i */ P_offd_start = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_procs > 1) { P_offd_pass = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST); if (cnt_nz_offd) P_offd_pass[1] = hypre_CTAlloc(HYPRE_Int, cnt_nz_offd, HYPRE_MEMORY_HOST); else P_offd_pass[1] = NULL; new_elmts = hypre_CTAlloc(HYPRE_BigInt*, num_passes, HYPRE_MEMORY_HOST); new_counter = hypre_CTAlloc(HYPRE_Int, num_passes+1, HYPRE_MEMORY_HOST); new_counter[0] = 0; new_counter[1] = n_coarse_offd; new_num_cols_offd = n_coarse_offd; new_elmts[0] = new_col_map_offd; } /*----------------------------------------------------------------------- * Pass 1: now we consider points of pass 1, with strong C_neighbors, *-----------------------------------------------------------------------*/ cnt_nz = 0; cnt_nz_offd = 0; /* JBS: Possible candidate for threading */ for (i=pass_pointer[1]; i < pass_pointer[2]; i++) { i1 = pass_array[i]; P_diag_start[i1] = cnt_nz; P_offd_start[i1] = cnt_nz_offd; for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (CF_marker[j1] == 1) { P_diag_pass[1][cnt_nz++] = fine_to_coarse[j1]; } } for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (CF_marker_offd[j1] == 1) { P_offd_pass[1][cnt_nz_offd++] = map_S_to_new[j1]; } } } total_nz += cnt_nz; total_nz_offd += cnt_nz_offd; if (num_procs > 1) { tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); Pext_send_map_start = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST); Pext_recv_vec_start = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST); Pext_pass = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST); Pext_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd+1, HYPRE_MEMORY_HOST); if (num_cols_offd) Pext_start = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); if (send_map_start[num_sends]) P_ncols = hypre_CTAlloc(HYPRE_Int, send_map_start[num_sends], HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_offd+1; i++) { Pext_i[i] = 0; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < send_map_start[num_sends]; i++) { P_ncols[i] = 0; } } old_Pext_send_size = 0; old_Pext_recv_size = 0; for (pass=2; pass < num_passes; pass++) { if (num_procs > 1) { Pext_send_map_start[pass] = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); Pext_recv_vec_start[pass] = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); Pext_send_size = 0; Pext_send_map_start[pass][0] = 0; for (i=0; i < num_sends; i++) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j,j1) reduction(+:Pext_send_size) HYPRE_SMP_SCHEDULE #endif for (j=send_map_start[i]; j < send_map_start[i+1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass-1) { P_ncols[j] = P_diag_i[j1+1] + P_offd_i[j1+1]; Pext_send_size += P_ncols[j]; } } Pext_send_map_start[pass][i+1] = Pext_send_size; } comm_handle = hypre_ParCSRCommHandleCreate (11, comm_pkg, P_ncols, &Pext_i[1]); hypre_ParCSRCommHandleDestroy(comm_handle); if (Pext_send_size > old_Pext_send_size) { hypre_TFree(Pext_send_buffer, HYPRE_MEMORY_HOST); Pext_send_buffer = hypre_CTAlloc(HYPRE_BigInt, Pext_send_size, HYPRE_MEMORY_HOST); } old_Pext_send_size = Pext_send_size; } cnt_offd = 0; for (i=0; i < num_sends; i++) { for (j=send_map_start[i]; j < send_map_start[i+1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass-1) { j_start = P_diag_start[j1]; j_end = j_start+P_diag_i[j1+1]; for (k=j_start; k < j_end; k++) { Pext_send_buffer[cnt_offd++] = my_first_cpt + (HYPRE_BigInt) P_diag_pass[pass-1][k]; } j_start = P_offd_start[j1]; j_end = j_start+P_offd_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = P_offd_pass[pass-1][k]; k3 = 0; while (k3 < pass-1) { if (k1 < new_counter[k3+1]) { k2 = k1-new_counter[k3]; Pext_send_buffer[cnt_offd++] = new_elmts[k3][k2]; break; } k3++; } } } } } if (num_procs > 1) { Pext_recv_size = 0; Pext_recv_vec_start[pass][0] = 0; cnt_offd = 0; for (i=0; i < num_recvs; i++) { for (j=recv_vec_start[i]; j<recv_vec_start[i+1]; j++) { if (assigned_offd[j] == pass-1) { Pext_start[j] = cnt_offd; cnt_offd += Pext_i[j+1]; } } Pext_recv_size = cnt_offd; Pext_recv_vec_start[pass][i+1] = Pext_recv_size; } hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = Pext_send_map_start[pass]; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = Pext_recv_vec_start[pass]; if (Pext_recv_size) { Pext_pass[pass] = hypre_CTAlloc(HYPRE_Int, Pext_recv_size, HYPRE_MEMORY_HOST); new_elmts[pass-1] = hypre_CTAlloc(HYPRE_BigInt, Pext_recv_size, HYPRE_MEMORY_HOST); } else { Pext_pass[pass] = NULL; new_elmts[pass-1] = NULL; } if (Pext_recv_size > old_Pext_recv_size) { hypre_TFree(loc, HYPRE_MEMORY_HOST); loc = hypre_CTAlloc(HYPRE_Int, Pext_recv_size, HYPRE_MEMORY_HOST); hypre_TFree(big_temp_pass, HYPRE_MEMORY_HOST); big_temp_pass = hypre_CTAlloc(HYPRE_BigInt, Pext_recv_size, HYPRE_MEMORY_HOST); } old_Pext_recv_size = Pext_recv_size; comm_handle = hypre_ParCSRCommHandleCreate (21, tmp_comm_pkg, Pext_send_buffer, big_temp_pass); hypre_ParCSRCommHandleDestroy(comm_handle); } cnt_new = 0; cnt_offd = 0; /* JBS: Possible candidate for threading */ for (i=0; i < num_recvs; i++) { for (j=recv_vec_start[i]; j < recv_vec_start[i+1]; j++) { if (assigned_offd[j] == pass-1) { for (j1 = cnt_offd; j1 < cnt_offd+Pext_i[j+1]; j1++) { big_k1 = big_temp_pass[j1]; k2 = (HYPRE_Int)(big_k1 - my_first_cpt); if (k2 > -1 && k2 < n_coarse) { Pext_pass[pass][j1] = -k2-1; } else { not_found = 1; k3 = 0; while (k3 < pass-1 && not_found) { k2 = hypre_BigBinarySearch(new_elmts[k3], big_k1, (new_counter[k3+1]-new_counter[k3])); if (k2 > -1) { Pext_pass[pass][j1] = k2 + new_counter[k3]; not_found = 0; } else { k3++; } } if (not_found) { new_elmts[pass-1][cnt_new] = big_k1; loc[cnt_new++] = j1; } } } cnt_offd += Pext_i[j+1]; } } } if (cnt_new) { hypre_BigQsortbi(new_elmts[pass-1],loc,0,cnt_new-1); cnt = 0; local_index = new_counter[pass-1]; Pext_pass[pass][loc[0]] = local_index; for (i=1; i < cnt_new; i++) { if (new_elmts[pass-1][i] > new_elmts[pass-1][cnt]) { new_elmts[pass-1][++cnt] = new_elmts[pass-1][i]; local_index++; } Pext_pass[pass][loc[i]] = local_index; } new_counter[pass] = local_index+1; } else if (num_procs > 1) new_counter[pass] = new_counter[pass-1]; if (new_num_cols_offd < local_index+1) { new_num_cols_offd = local_index+1; } pass_length = pass_pointer[pass+1] - pass_pointer[pass]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,my_thread_num,num_threads,thread_start,thread_stop,cnt_nz,cnt_nz_offd,i1,j,j1,j_start,j_end,k1,k,P_marker,P_marker_offd) #endif { /* Thread by computing the sparsity structure for this pass only over * each thread's range of rows. Rows are divided up evenly amongst * the threads. The necessary thread-wise temporary arrays, like * P_marker, are initialized and de-allocated internally to the * parallel region. */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = (pass_length/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { thread_stop = pass_length; } else { thread_stop = (pass_length/num_threads)*(my_thread_num+1); } thread_start += pass_pointer[pass]; thread_stop += pass_pointer[pass]; /* Local initializations */ cnt_nz = 0; cnt_nz_offd = 0; /* This block of code is to go to the top of the parallel region starting before * the loop over num_passes. */ P_marker = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); /* marks points to see if they're counted */ for (i=0; i < n_coarse; i++) { P_marker[i] = -1; } if (new_num_cols_offd == local_index+1) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); for (i=0; i < new_num_cols_offd; i++) { P_marker_offd[i] = -1; } } else if (n_coarse_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST); for (i=0; i < n_coarse_offd; i++) { P_marker_offd[i] = -1; } } /* Need some variables to store each threads cnt_nz and cnt_nz_offd, and * then stitch things together as in par_interp.c * This loop writes * P_diag_i, P_offd_i: data parallel here, and require no special treatment * P_diag_start, P_offd_start: are not data parallel, require special treatment */ for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; P_diag_start[i1] = cnt_nz; P_offd_start[i1] = cnt_nz_offd; for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass-1) { j_start = P_diag_start[j1]; j_end = j_start+P_diag_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = P_diag_pass[pass-1][k]; if (P_marker[k1] != i1) { cnt_nz++; P_diag_i[i1+1]++; P_marker[k1] = i1; } } j_start = P_offd_start[j1]; j_end = j_start+P_offd_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = P_offd_pass[pass-1][k]; if (P_marker_offd[k1] != i1) { cnt_nz_offd++; P_offd_i[i1+1]++; P_marker_offd[k1] = i1; } } } } j_start = 0; for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass-1) { j_start = Pext_start[j1]; j_end = j_start+Pext_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; if (k1 < 0) { if (P_marker[-k1-1] != i1) { cnt_nz++; P_diag_i[i1+1]++; P_marker[-k1-1] = i1; } } else if (P_marker_offd[k1] != i1) { cnt_nz_offd++; P_offd_i[i1+1]++; P_marker_offd[k1] = i1; } } } } } /* Update P_diag_start, P_offd_start with cumulative * nonzero counts over all threads */ if(my_thread_num == 0) { max_num_threads[0] = num_threads; } cnt_nz_offd_per_thread[my_thread_num] = cnt_nz_offd; cnt_nz_per_thread[my_thread_num] = cnt_nz; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if(my_thread_num == 0) { for(i = 1; i < max_num_threads[0]; i++) { cnt_nz_offd_per_thread[i] += cnt_nz_offd_per_thread[i-1]; cnt_nz_per_thread[i] += cnt_nz_per_thread[i-1]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if(my_thread_num > 0) { /* update this thread's section of P_diag_start and P_offd_start * with the num of nz's counted by previous threads */ for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; P_diag_start[i1] += cnt_nz_per_thread[my_thread_num-1]; P_offd_start[i1] += cnt_nz_offd_per_thread[my_thread_num-1]; } } else /* if my_thread_num == 0 */ { /* Grab the nz count for all threads */ cnt_nz = cnt_nz_per_thread[max_num_threads[0]-1]; cnt_nz_offd = cnt_nz_offd_per_thread[max_num_threads[0]-1]; /* Updated total nz count */ total_nz += cnt_nz; total_nz_offd += cnt_nz_offd; /* Allocate P_diag_pass and P_offd_pass for all threads */ P_diag_pass[pass] = hypre_CTAlloc(HYPRE_Int, cnt_nz, HYPRE_MEMORY_HOST); if (cnt_nz_offd) P_offd_pass[pass] = hypre_CTAlloc(HYPRE_Int, cnt_nz_offd, HYPRE_MEMORY_HOST); else if (num_procs > 1) P_offd_pass[pass] = NULL; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* offset cnt_nz and cnt_nz_offd to point to the starting * point in P_diag_pass and P_offd_pass for each thread */ if(my_thread_num > 0) { cnt_nz = cnt_nz_per_thread[my_thread_num-1]; cnt_nz_offd = cnt_nz_offd_per_thread[my_thread_num-1]; } else { cnt_nz = 0; cnt_nz_offd = 0; } /* Set P_diag_pass and P_offd_pass */ for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass-1) { j_start = P_diag_start[j1]; j_end = j_start+P_diag_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = P_diag_pass[pass-1][k]; if (P_marker[k1] != -i1-1) { P_diag_pass[pass][cnt_nz++] = k1; P_marker[k1] = -i1-1; } } j_start = P_offd_start[j1]; j_end = j_start+P_offd_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = P_offd_pass[pass-1][k]; if (P_marker_offd[k1] != -i1-1) { P_offd_pass[pass][cnt_nz_offd++] = k1; P_marker_offd[k1] = -i1-1; } } } } for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass-1) { j_start = Pext_start[j1]; j_end = j_start+Pext_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; if (k1 < 0) { if (P_marker[-k1-1] != -i1-1) { P_diag_pass[pass][cnt_nz++] = -k1-1; P_marker[-k1-1] = -i1-1; } } else if (P_marker_offd[k1] != -i1-1) { P_offd_pass[pass][cnt_nz_offd++] = k1; P_marker_offd[k1] = -i1-1; } } } } } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if ( (n_coarse_offd) || (new_num_cols_offd == local_index+1) ) { hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } } /* End parallel region */ } hypre_TFree(loc, HYPRE_MEMORY_HOST); hypre_TFree(P_ncols, HYPRE_MEMORY_HOST); hypre_TFree(Pext_send_buffer, HYPRE_MEMORY_HOST); hypre_TFree(big_temp_pass, HYPRE_MEMORY_HOST); hypre_TFree(new_recv_vec_start, HYPRE_MEMORY_HOST); hypre_TFree(cnt_nz_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(cnt_nz_offd_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, total_nz, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, total_nz, HYPRE_MEMORY_HOST); if (total_nz_offd) { P_offd_j = hypre_CTAlloc(HYPRE_Int, total_nz_offd, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, total_nz_offd, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) { P_diag_i[i+1] += P_diag_i[i]; P_offd_i[i+1] += P_offd_i[i]; } /* determine P for coarse points */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,i1) HYPRE_SMP_SCHEDULE #endif for (i=0; i < n_coarse; i++) { i1 = C_array[i]; P_diag_j[P_diag_i[i1]] = fine_to_coarse[i1]; P_diag_data[P_diag_i[i1]] = 1.0; } if (weight_option) /*if this is set, weights are separated into negative and positive offdiagonals and accumulated accordingly */ { pass_length = pass_pointer[2]-pass_pointer[1]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,P_marker,P_marker_offd,i,i1,sum_C_pos,sum_C_neg,sum_N_pos,sum_N_neg,j_start,j_end,j,k1,cnt,j1,cnt_offd,diagonal,alfa,beta) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for pass one. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i=0; i < n_fine; i++) { P_marker[i] = -1; } if (num_cols_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd; i++) P_marker_offd[i] = -1; } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[1] + (pass_length/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { thread_stop = pass_pointer[1] + pass_length; } else { thread_stop = pass_pointer[1] + (pass_length/num_threads)*(my_thread_num+1); } /* determine P for points of pass 1, i.e. neighbors of coarse points */ for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C_pos = 0; sum_C_neg = 0; sum_N_pos = 0; sum_N_neg = 0; j_start = P_diag_start[i1]; j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_diag_pass[1][j]; P_marker[C_array[k1]] = i1; } cnt = P_diag_i[i1]; for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++) { j1 = A_diag_j[j]; if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) { if (A_diag_data[j] < 0) sum_N_neg += A_diag_data[j]; else sum_N_pos += A_diag_data[j]; } if (j1 != -1 && P_marker[j1] == i1) { P_diag_data[cnt] = A_diag_data[j]; P_diag_j[cnt++] = fine_to_coarse[j1]; if (A_diag_data[j] < 0) sum_C_neg += A_diag_data[j]; else sum_C_pos += A_diag_data[j]; } } j_start = P_offd_start[i1]; j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_offd_pass[1][j]; P_marker_offd[C_array_offd[k1]] = i1; } cnt_offd = P_offd_i[i1]; for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++) { j1 = A_offd_j[j]; if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func_offd[j1])) { if (A_offd_data[j] < 0) sum_N_neg += A_offd_data[j]; else sum_N_pos += A_offd_data[j]; } if (j1 != -1 && P_marker_offd[j1] == i1) { P_offd_data[cnt_offd] = A_offd_data[j]; P_offd_j[cnt_offd++] = map_S_to_new[j1]; if (A_offd_data[j] < 0) sum_C_neg += A_offd_data[j]; else sum_C_pos += A_offd_data[j]; } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C_neg*diagonal != 0) alfa = -sum_N_neg/(sum_C_neg*diagonal); if (sum_C_pos*diagonal != 0) beta = -sum_N_pos/(sum_C_pos*diagonal); for (j=P_diag_i[i1]; j < cnt; j++) if (P_diag_data[j] < 0) P_diag_data[j] *= alfa; else P_diag_data[j] *= beta; for (j=P_offd_i[i1]; j < cnt_offd; j++) if (P_offd_data[j] < 0) P_offd_data[j] *= alfa; else P_offd_data[j] *= beta; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if (num_cols_offd) { hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } } /* End Parallel Region */ old_Pext_send_size = 0; old_Pext_recv_size = 0; if (n_coarse) hypre_TFree(C_array, HYPRE_MEMORY_HOST); hypre_TFree(C_array_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_diag_pass[1], HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_TFree(P_offd_pass[1], HYPRE_MEMORY_HOST); for (pass = 2; pass < num_passes; pass++) { if (num_procs > 1) { Pext_send_size = Pext_send_map_start[pass][num_sends]; if (Pext_send_size > old_Pext_send_size) { hypre_TFree(Pext_send_data, HYPRE_MEMORY_HOST); Pext_send_data = hypre_CTAlloc(HYPRE_Real, Pext_send_size, HYPRE_MEMORY_HOST); } old_Pext_send_size = Pext_send_size; cnt_offd = 0; for (i=0; i < num_sends; i++) { for (j=send_map_start[i]; j < send_map_start[i+1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass-1) { j_start = P_diag_i[j1]; j_end = P_diag_i[j1+1]; for (k=j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_diag_data[k]; } j_start = P_offd_i[j1]; j_end = P_offd_i[j1+1]; for (k=j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_offd_data[k]; } } } } hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = Pext_send_map_start[pass]; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = Pext_recv_vec_start[pass]; Pext_recv_size = Pext_recv_vec_start[pass][num_recvs]; if (Pext_recv_size > old_Pext_recv_size) { hypre_TFree(Pext_data, HYPRE_MEMORY_HOST); Pext_data = hypre_CTAlloc(HYPRE_Real, Pext_recv_size, HYPRE_MEMORY_HOST); } old_Pext_recv_size = Pext_recv_size; comm_handle = hypre_ParCSRCommHandleCreate (1, tmp_comm_pkg, Pext_send_data, Pext_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(Pext_send_map_start[pass], HYPRE_MEMORY_HOST); hypre_TFree(Pext_recv_vec_start[pass], HYPRE_MEMORY_HOST); } pass_length = pass_pointer[pass+1]-pass_pointer[pass]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,P_marker,P_marker_offd,i,i1,sum_C_neg,sum_C_pos,sum_N_neg,sum_N_pos,j_start,j_end,cnt,j,k1,cnt_offd,j1,k,alfa,beta,diagonal,C_array,C_array_offd) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for passes >= 2. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i=0; i < n_fine; i++) { P_marker[i] = -1; } if (num_cols_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd; i++) P_marker_offd[i] = -1; } C_array = NULL; C_array_offd = NULL; if (n_coarse) { C_array = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); } if (new_num_cols_offd > n_coarse_offd) { C_array_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); } else if (n_coarse_offd) { C_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST); } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[pass] + (pass_length/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { thread_stop = pass_pointer[pass] + pass_length; } else { thread_stop = pass_pointer[pass] + (pass_length/num_threads)*(my_thread_num+1); } /* Loop over each thread's row-range */ for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C_neg = 0; sum_C_pos = 0; sum_N_neg = 0; sum_N_pos = 0; j_start = P_diag_start[i1]; j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1]; cnt = P_diag_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_diag_pass[pass][j]; C_array[k1] = cnt; P_diag_data[cnt] = 0; P_diag_j[cnt++] = k1; } j_start = P_offd_start[i1]; j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1]; cnt_offd = P_offd_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_offd_pass[pass][j]; C_array_offd[k1] = cnt_offd; P_offd_data[cnt_offd] = 0; P_offd_j[cnt_offd++] = k1; } for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass-1) P_marker[j1] = i1; } for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass-1) P_marker_offd[j1] = i1; } for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++) { j1 = A_diag_j[j]; if (P_marker[j1] == i1) { for (k=P_diag_i[j1]; k < P_diag_i[j1+1]; k++) { k1 = P_diag_j[k]; alfa = A_diag_data[j]*P_diag_data[k]; P_diag_data[C_array[k1]] += alfa; if (alfa < 0) { sum_C_neg += alfa; sum_N_neg += alfa; } else { sum_C_pos += alfa; sum_N_pos += alfa; } } for (k=P_offd_i[j1]; k < P_offd_i[j1+1]; k++) { k1 = P_offd_j[k]; alfa = A_diag_data[j]*P_offd_data[k]; P_offd_data[C_array_offd[k1]] += alfa; if (alfa < 0) { sum_C_neg += alfa; sum_N_neg += alfa; } else { sum_C_pos += alfa; sum_N_pos += alfa; } } } else { if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) { if (A_diag_data[j] < 0) sum_N_neg += A_diag_data[j]; else sum_N_pos += A_diag_data[j]; } } } for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++) { j1 = A_offd_j[j]; if (j1 > -1 && P_marker_offd[j1] == i1) { j_start = Pext_start[j1]; j_end = j_start+Pext_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; alfa = A_offd_data[j]*Pext_data[k]; if (k1 < 0) P_diag_data[C_array[-k1-1]] += alfa; else P_offd_data[C_array_offd[k1]] += alfa; if (alfa < 0) { sum_C_neg += alfa; sum_N_neg += alfa; } else { sum_C_pos += alfa; sum_N_pos += alfa; } } } else { if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func_offd[j1] == dof_func[i1])) { if ( A_offd_data[j] < 0) sum_N_neg += A_offd_data[j]; else sum_N_pos += A_offd_data[j]; } } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C_neg*diagonal != 0) alfa = -sum_N_neg/(sum_C_neg*diagonal); if (sum_C_pos*diagonal != 0) beta = -sum_N_pos/(sum_C_pos*diagonal); for (j=P_diag_i[i1]; j < P_diag_i[i1+1]; j++) if (P_diag_data[j] < 0) P_diag_data[j] *= alfa; else P_diag_data[j] *= beta; for (j=P_offd_i[i1]; j < P_offd_i[i1+1]; j++) if (P_offd_data[j] < 0) P_offd_data[j] *= alfa; else P_offd_data[j] *= beta; } hypre_TFree(C_array, HYPRE_MEMORY_HOST); hypre_TFree(C_array_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if (num_cols_offd) { hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } } /* End OMP Parallel Section */ hypre_TFree(P_diag_pass[pass], HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_TFree(P_offd_pass[pass], HYPRE_MEMORY_HOST); hypre_TFree(Pext_pass[pass], HYPRE_MEMORY_HOST); } } /* End num_passes for-loop */ } else /* no distinction between positive and negative offdiagonal element */ { pass_length = pass_pointer[2]-pass_pointer[1]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,k,k1,i,i1,j,j1,sum_C,sum_N,j_start,j_end,cnt,tmp_marker,tmp_marker_offd,cnt_offd,diagonal,alfa) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for pass one. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ /* Initialize thread-wise variables */ tmp_marker = NULL; if (n_fine) { tmp_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } tmp_marker_offd = NULL; if (num_cols_offd) { tmp_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) { tmp_marker[i] = -1; } for (i=0; i < num_cols_offd; i++) { tmp_marker_offd[i] = -1; } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[1] + (pass_length/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { thread_stop = pass_pointer[1] + pass_length; } else { thread_stop = pass_pointer[1] + (pass_length/num_threads)*(my_thread_num+1); } /* determine P for points of pass 1, i.e. neighbors of coarse points */ for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C = 0; sum_N = 0; j_start = P_diag_start[i1]; j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_diag_pass[1][j]; tmp_marker[C_array[k1]] = i1; } cnt = P_diag_i[i1]; for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++) { j1 = A_diag_j[j]; if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) sum_N += A_diag_data[j]; if (j1 != -1 && tmp_marker[j1] == i1) { P_diag_data[cnt] = A_diag_data[j]; P_diag_j[cnt++] = fine_to_coarse[j1]; sum_C += A_diag_data[j]; } } j_start = P_offd_start[i1]; j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_offd_pass[1][j]; tmp_marker_offd[C_array_offd[k1]] = i1; } cnt_offd = P_offd_i[i1]; for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++) { j1 = A_offd_j[j]; if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func_offd[j1])) sum_N += A_offd_data[j]; if (j1 != -1 && tmp_marker_offd[j1] == i1) { P_offd_data[cnt_offd] = A_offd_data[j]; P_offd_j[cnt_offd++] = map_S_to_new[j1]; sum_C += A_offd_data[j]; } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C*diagonal != 0) alfa = -sum_N/(sum_C*diagonal); for (j=P_diag_i[i1]; j < cnt; j++) P_diag_data[j] *= alfa; for (j=P_offd_i[i1]; j < cnt_offd; j++) P_offd_data[j] *= alfa; } hypre_TFree(tmp_marker, HYPRE_MEMORY_HOST); hypre_TFree(tmp_marker_offd, HYPRE_MEMORY_HOST); } /* end OMP parallel region */ old_Pext_send_size = 0; old_Pext_recv_size = 0; if (n_coarse) hypre_TFree(C_array, HYPRE_MEMORY_HOST); hypre_TFree(C_array_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_diag_pass[1], HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_TFree(P_offd_pass[1], HYPRE_MEMORY_HOST); for (pass = 2; pass < num_passes; pass++) { if (num_procs > 1) { Pext_send_size = Pext_send_map_start[pass][num_sends]; if (Pext_send_size > old_Pext_send_size) { hypre_TFree(Pext_send_data, HYPRE_MEMORY_HOST); Pext_send_data = hypre_CTAlloc(HYPRE_Real, Pext_send_size, HYPRE_MEMORY_HOST); } old_Pext_send_size = Pext_send_size; cnt_offd = 0; for (i=0; i < num_sends; i++) { for (j=send_map_start[i]; j < send_map_start[i+1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass-1) { j_start = P_diag_i[j1]; j_end = P_diag_i[j1+1]; for (k=j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_diag_data[k]; } j_start = P_offd_i[j1]; j_end = P_offd_i[j1+1]; for (k=j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_offd_data[k]; } } } } hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = Pext_send_map_start[pass]; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = Pext_recv_vec_start[pass]; Pext_recv_size = Pext_recv_vec_start[pass][num_recvs]; if (Pext_recv_size > old_Pext_recv_size) { hypre_TFree(Pext_data, HYPRE_MEMORY_HOST); Pext_data = hypre_CTAlloc(HYPRE_Real, Pext_recv_size, HYPRE_MEMORY_HOST); } old_Pext_recv_size = Pext_recv_size; comm_handle = hypre_ParCSRCommHandleCreate (1, tmp_comm_pkg, Pext_send_data, Pext_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(Pext_send_map_start[pass], HYPRE_MEMORY_HOST); hypre_TFree(Pext_recv_vec_start[pass], HYPRE_MEMORY_HOST); } pass_length = pass_pointer[pass+1]-pass_pointer[pass]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,k,k1,i,i1,j,j1,sum_C,sum_N,j_start,j_end,cnt,tmp_marker,tmp_marker_offd,cnt_offd,diagonal,alfa,tmp_array,tmp_array_offd) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for passes >= 2. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ /* Initialize thread-wise variables */ tmp_marker = NULL; if (n_fine) { tmp_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } tmp_marker_offd = NULL; if (num_cols_offd) { tmp_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } tmp_array = NULL; if (n_coarse) { tmp_array = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); } tmp_array_offd = NULL; if (new_num_cols_offd > n_coarse_offd) { tmp_array_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); } else { tmp_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST);} for (i=0; i < n_fine; i++) { tmp_marker[i] = -1; } for (i=0; i < num_cols_offd; i++) { tmp_marker_offd[i] = -1; } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[pass] + (pass_length/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { thread_stop = pass_pointer[pass] + pass_length; } else { thread_stop = pass_pointer[pass] + (pass_length/num_threads)*(my_thread_num+1); } for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C = 0; sum_N = 0; j_start = P_diag_start[i1]; j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1]; cnt = P_diag_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_diag_pass[pass][j]; tmp_array[k1] = cnt; P_diag_data[cnt] = 0; P_diag_j[cnt++] = k1; } j_start = P_offd_start[i1]; j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1]; cnt_offd = P_offd_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_offd_pass[pass][j]; tmp_array_offd[k1] = cnt_offd; P_offd_data[cnt_offd] = 0; P_offd_j[cnt_offd++] = k1; } for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass-1) tmp_marker[j1] = i1; } for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass-1) tmp_marker_offd[j1] = i1; } for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++) { j1 = A_diag_j[j]; if (tmp_marker[j1] == i1) { for (k=P_diag_i[j1]; k < P_diag_i[j1+1]; k++) { k1 = P_diag_j[k]; alfa = A_diag_data[j]*P_diag_data[k]; P_diag_data[tmp_array[k1]] += alfa; sum_C += alfa; sum_N += alfa; } for (k=P_offd_i[j1]; k < P_offd_i[j1+1]; k++) { k1 = P_offd_j[k]; alfa = A_diag_data[j]*P_offd_data[k]; P_offd_data[tmp_array_offd[k1]] += alfa; sum_C += alfa; sum_N += alfa; } } else { if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) sum_N += A_diag_data[j]; } } for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++) { j1 = A_offd_j[j]; if (j1 > -1 && tmp_marker_offd[j1] == i1) { j_start = Pext_start[j1]; j_end = j_start+Pext_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; alfa = A_offd_data[j]*Pext_data[k]; if (k1 < 0) P_diag_data[tmp_array[-k1-1]] += alfa; else P_offd_data[tmp_array_offd[k1]] += alfa; sum_C += alfa; sum_N += alfa; } } else { if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func_offd[j1] == dof_func[i1])) sum_N += A_offd_data[j]; } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C*diagonal != 0.0) alfa = -sum_N/(sum_C*diagonal); for (j=P_diag_i[i1]; j < P_diag_i[i1+1]; j++) P_diag_data[j] *= alfa; for (j=P_offd_i[i1]; j < P_offd_i[i1+1]; j++) P_offd_data[j] *= alfa; } hypre_TFree(tmp_marker, HYPRE_MEMORY_HOST); hypre_TFree(tmp_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_array, HYPRE_MEMORY_HOST); hypre_TFree(tmp_array_offd, HYPRE_MEMORY_HOST); } /* End OMP Parallel Section */ hypre_TFree(P_diag_pass[pass], HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_TFree(P_offd_pass[pass], HYPRE_MEMORY_HOST); hypre_TFree(Pext_pass[pass], HYPRE_MEMORY_HOST); } } } hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(Pext_send_map_start, HYPRE_MEMORY_HOST); hypre_TFree(Pext_recv_vec_start, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(Pext_send_data, HYPRE_MEMORY_HOST); hypre_TFree(Pext_data, HYPRE_MEMORY_HOST); hypre_TFree(P_diag_pass, HYPRE_MEMORY_HOST); hypre_TFree(P_offd_pass, HYPRE_MEMORY_HOST); hypre_TFree(Pext_pass, HYPRE_MEMORY_HOST); hypre_TFree(P_diag_start, HYPRE_MEMORY_HOST); hypre_TFree(P_offd_start, HYPRE_MEMORY_HOST); hypre_TFree(Pext_start, HYPRE_MEMORY_HOST); hypre_TFree(Pext_i, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(assigned, HYPRE_MEMORY_HOST); hypre_TFree(assigned_offd, HYPRE_MEMORY_HOST); hypre_TFree(pass_pointer, HYPRE_MEMORY_HOST); hypre_TFree(pass_array, HYPRE_MEMORY_HOST); hypre_TFree(map_S_to_new, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; /* Compress P, removing coefficients smaller than trunc_factor * Max and/or keep yat most <P_max_elmts> per row absolutely maximal coefficients */ if (trunc_factor != 0.0 || P_max_elmts != 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, P_max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); } P_offd_size = P_offd_i[n_fine]; num_cols_offd_P = 0; if (P_offd_size) { if (new_num_cols_offd > num_cols_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); } else { P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < new_num_cols_offd; i++) { P_marker_offd[i] = 0; } num_cols_offd_P = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker_offd[index]) { num_cols_offd_P++; P_marker_offd[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_P, HYPRE_MEMORY_HOST); permute = hypre_CTAlloc(HYPRE_Int, new_counter[num_passes-1], HYPRE_MEMORY_HOST); big_permute = hypre_CTAlloc(HYPRE_BigInt, new_counter[num_passes-1], HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < new_counter[num_passes-1]; i++) big_permute[i] = -1; cnt = 0; for (i=0; i < num_passes-1; i++) { for (j=new_counter[i]; j < new_counter[i+1]; j++) { if (P_marker_offd[j]) { col_map_offd_P[cnt] = new_elmts[i][j-(HYPRE_BigInt)new_counter[i]]; big_permute[j] = col_map_offd_P[cnt++]; } } } hypre_BigQsort0(col_map_offd_P,0,num_cols_offd_P-1); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,big_k1) HYPRE_SMP_SCHEDULE #endif for (i=0; i < new_counter[num_passes-1]; i++) { big_k1 = big_permute[i]; if (big_k1 != -1) permute[i] = hypre_BigBinarySearch(col_map_offd_P,big_k1,num_cols_offd_P); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) { P_offd_j[i] = permute[P_offd_j[i]]; } hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } if (num_procs > 1) { for (i=0; i < num_passes-1; i++) hypre_TFree(new_elmts[i], HYPRE_MEMORY_HOST); } hypre_TFree(permute, HYPRE_MEMORY_HOST); hypre_TFree(big_permute, HYPRE_MEMORY_HOST); hypre_TFree(new_elmts, HYPRE_MEMORY_HOST); hypre_TFree(new_counter, HYPRE_MEMORY_HOST); if (num_cols_offd_P) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_offd_P; } if (n_SF) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; } if (num_procs > 1) { hypre_MatvecCommPkgCreate(P); } *P_ptr = P; /* wall_time = hypre_MPI_Wtime() - wall_time; hypre_printf("TOTAL TIME %1.2e \n",wall_time); */ /*----------------------------------------------------------------------- * Build and return dof_func array for coarse grid. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Free mapping vector and marker array. *-----------------------------------------------------------------------*/ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MULTIPASS_INTERP] += hypre_MPI_Wtime(); #endif return(0); } HYPRE_Int hypre_BoomerAMGBuildMultipass( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int P_max_elmts, HYPRE_Int weight_option, hypre_ParCSRMatrix **P_ptr ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("MultipassInterp"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_ParCSRMatrixMemoryLocation(S) ); if (exec == HYPRE_EXEC_DEVICE) { /* Notice: call the mod version on GPUs */ ierr = hypre_BoomerAMGBuildModMultipassDevice( A, CF_marker, S, num_cpts_global, trunc_factor, P_max_elmts, 9, num_functions, dof_func, P_ptr ); } else #endif { ierr = hypre_BoomerAMGBuildMultipassHost( A, CF_marker, S, num_cpts_global, num_functions, dof_func, debug_flag, trunc_factor, P_max_elmts, weight_option, P_ptr ); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; }
profile.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR OOO FFFFF IIIII L EEEEE % % P P R R O O F I L E % % PPPP RRRR O O FFF I L EEE % % P R R O O F I L E % % P R R OOO F IIIII LLLLL EEEEE % % % % % % MagickCore Image Profile Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/hashmap.h" #include "MagickCore/image.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/option-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #elif defined(MAGICKCORE_HAVE_LCMS2_H) #include <wchar.h> #include "lcms2.h" #elif defined(MAGICKCORE_HAVE_LCMS_LCMS_H) #include <lcms/lcms.h> #else #include "lcms.h" #endif #endif /* Define declarations. */ #if !defined(LCMS_VERSION) || (LCMS_VERSION < 2000) #define cmsSigCmykData icSigCmykData #define cmsSigGrayData icSigGrayData #define cmsSigLabData icSigLabData #define cmsSigLuvData icSigLuvData #define cmsSigRgbData icSigRgbData #define cmsSigXYZData icSigXYZData #define cmsSigYCbCrData icSigYCbCrData #define cmsSigLinkClass icSigLinkClass #define cmsColorSpaceSignature icColorSpaceSignature #define cmsUInt32Number DWORD #define cmsSetLogErrorHandler(handler) cmsSetErrorHandler(handler) #define cmsCreateTransformTHR(context,source_profile,source_type, \ target_profile,target_type,intent,flags) cmsCreateTransform(source_profile, \ source_type,target_profile,target_type,intent,flags); #define cmsOpenProfileFromMemTHR(context,profile,length) \ cmsOpenProfileFromMem(profile,length) #endif /* Forward declarations */ static MagickBooleanType SetImageProfileInternal(Image *,const char *,const StringInfo *, const MagickBooleanType,ExceptionInfo *); static void WriteTo8BimProfile(Image *,const char*,const StringInfo *); /* Typedef declarations */ struct _ProfileInfo { char *name; size_t length; unsigned char *info; size_t signature; }; typedef struct _CMSExceptionInfo { Image *image; ExceptionInfo *exception; } CMSExceptionInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickSignature); if (clone_image->profiles != (void *) NULL) { if (image->profiles != (void *) NULL) DestroyImageProfiles(image); image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); WriteTo8BimProfile(image,name,(StringInfo *) NULL); return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { char key[MaxTextExtent]; const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); (void) CopyMagickString(key,name,MaxTextExtent); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,key); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) static unsigned short **DestroyPixelThreadSet(unsigned short **pixels) { register ssize_t i; assert(pixels != (unsigned short **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (unsigned short *) NULL) pixels[i]=(unsigned short *) RelinquishMagickMemory(pixels[i]); pixels=(unsigned short **) RelinquishMagickMemory(pixels); return(pixels); } static unsigned short **AcquirePixelThreadSet(const size_t columns, const size_t channels) { register ssize_t i; unsigned short **pixels; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(unsigned short **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (unsigned short **) NULL) return((unsigned short **) NULL); (void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(unsigned short *) AcquireQuantumMemory(columns,channels* sizeof(**pixels)); if (pixels[i] == (unsigned short *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform) { register ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformThreadSet(Image *image, const cmsHPROFILE source_profile,const cmsUInt32Number source_type, const cmsHPROFILE target_profile,const cmsUInt32Number target_type, const int intent,const cmsUInt32Number flags) { cmsHTRANSFORM *transform; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) ResetMagickMemory(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR((cmsContext) image,source_profile, source_type,target_profile,target_type,intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformThreadSet(transform)); } return(transform); } #endif #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(LCMS_VERSION) && (LCMS_VERSION >= 2000) static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { CMSExceptionInfo *cms_exception; ExceptionInfo *exception; Image *image; cms_exception=(CMSExceptionInfo *) context; image=cms_exception->image; exception=cms_exception->exception; if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s'","unknown context"); return; } if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s'",image->filename); } #else static int CMSExceptionHandler(int severity,const char *message) { (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%d, %s", severity,message != (char *) NULL ? message : "no message"); return(1); } #endif #endif static MagickBooleanType SetsRGBImageProfile(Image *image, ExceptionInfo *exception) { static unsigned char sRGBProfile[] = { 0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a, 0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67, 0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70, 0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88, 0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c, 0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24, 0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d, 0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57, 0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65, 0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c, 0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2, 0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d, 0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0, 0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87, 0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19, 0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37, 0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54, 0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72, 0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90, 0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae, 0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb, 0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb, 0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d, 0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32, 0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59, 0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83, 0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1, 0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1, 0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14, 0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b, 0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84, 0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1, 0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00, 0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43, 0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a, 0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3, 0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20, 0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71, 0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4, 0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c, 0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77, 0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5, 0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37, 0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d, 0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07, 0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74, 0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5, 0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a, 0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2, 0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f, 0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf, 0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54, 0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc, 0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69, 0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9, 0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e, 0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26, 0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3, 0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64, 0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09, 0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3, 0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61, 0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13, 0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9, 0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84, 0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43, 0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06, 0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce, 0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b, 0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c, 0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41, 0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b, 0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa, 0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd, 0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5, 0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2, 0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3, 0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99, 0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94, 0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94, 0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98, 0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1, 0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf, 0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2, 0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda, 0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7, 0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18, 0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f, 0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b, 0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b, 0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1, 0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c, 0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c, 0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91, 0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb, 0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a, 0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f, 0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8, 0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37, 0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c, 0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05, 0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74, 0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8, 0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61, 0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0, 0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64, 0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee, 0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d, 0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12, 0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab, 0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b, 0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0, 0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a, 0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a, 0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00, 0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb, 0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c, 0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42, 0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f, 0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0, 0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8, 0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95, 0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78, 0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61, 0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f, 0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43, 0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d, 0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d, 0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43, 0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f, 0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60, 0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78, 0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95, 0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8, 0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1, 0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11, 0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46, 0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81, 0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2, 0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a, 0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57, 0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab, 0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04, 0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64, 0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca, 0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36, 0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8, 0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20, 0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f, 0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24, 0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf, 0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40, 0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8, 0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76, 0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a, 0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4, 0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75, 0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d, 0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea, 0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae, 0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79, 0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a, 0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21, 0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff, 0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3, 0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce, 0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf, 0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7, 0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5, 0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba, 0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6, 0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8, 0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1, 0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10, 0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36, 0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63, 0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96, 0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0, 0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11, 0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58, 0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7, 0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb, 0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57, 0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba, 0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (GetImageProfile(image,"icm") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(sRGBProfile)); SetStringInfoDatum(profile,sRGBProfile); status=SetImageProfile(image,"icm",profile,exception); profile=DestroyStringInfo(profile); return(status); } MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length,ExceptionInfo *exception) { #define ProfileImageTag "Profile/Image" #define ThrowProfileException(severity,tag,context) \ { \ if (source_profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_profile); \ if (target_profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(name != (const char *) NULL); if ((datum == (const void *) NULL) || (length == 0)) { char *next; /* Delete image profile(s). */ ResetImageProfileIterator(image); for (next=GetNextImageProfile(image); next != (const char *) NULL; ) { if (IsOptionMember(next,name) != MagickFalse) { (void) DeleteImageProfile(image,next); ResetImageProfileIterator(image); } next=GetNextImageProfile(image); } return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile,exception); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace",exception); (void) value; if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image,exception); value=GetImageProperty(image,"exif:InteroperabilityIndex",exception); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image,exception); /* Future. value=GetImageProperty(image,"exif:InteroperabilityIndex",exception); if (LocaleCompare(value,"R03.") != 0) (void) SetAdobeRGB1998ImageProfile(image,exception); */ icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (LCMS)",image->filename); #else { cmsHPROFILE source_profile; CMSExceptionInfo cms_exception; /* Transform pixel colors as defined by the color profiles. */ cmsSetLogErrorHandler(CMSExceptionHandler); cms_exception.image=image; cms_exception.exception=exception; (void) cms_exception; source_profile=cmsOpenProfileFromMemTHR((cmsContext) &cms_exception, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_profile == (cmsHPROFILE) NULL) ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile,exception); else { CacheView *image_view; ColorspaceType source_colorspace, target_colorspace; cmsColorSpaceSignature signature; cmsHPROFILE target_profile; cmsHTRANSFORM *restrict transform; cmsUInt32Number flags, source_type, target_type; int intent; MagickBooleanType status; MagickOffsetType progress; size_t source_channels, target_channels; ssize_t y; unsigned short **restrict source_pixels, **restrict target_pixels; target_profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_profile=source_profile; source_profile=cmsOpenProfileFromMemTHR((cmsContext) &cms_exception,GetStringInfoDatum(icc_profile), (cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } switch (cmsGetColorSpace(source_profile)) { case cmsSigCmykData: { source_colorspace=CMYKColorspace; source_type=(cmsUInt32Number) TYPE_CMYK_16; source_channels=4; break; } case cmsSigGrayData: { source_colorspace=GRAYColorspace; source_type=(cmsUInt32Number) TYPE_GRAY_16; source_channels=1; break; } case cmsSigLabData: { source_colorspace=LabColorspace; source_type=(cmsUInt32Number) TYPE_Lab_16; source_channels=3; break; } case cmsSigLuvData: { source_colorspace=YUVColorspace; source_type=(cmsUInt32Number) TYPE_YUV_16; source_channels=3; break; } case cmsSigRgbData: { source_colorspace=sRGBColorspace; source_type=(cmsUInt32Number) TYPE_RGB_16; source_channels=3; break; } case cmsSigXYZData: { source_colorspace=XYZColorspace; source_type=(cmsUInt32Number) TYPE_XYZ_16; source_channels=3; break; } case cmsSigYCbCrData: { source_colorspace=YCbCrColorspace; source_type=(cmsUInt32Number) TYPE_YCbCr_16; source_channels=3; break; } default: { source_colorspace=UndefinedColorspace; source_type=(cmsUInt32Number) TYPE_RGB_16; source_channels=3; break; } } signature=cmsGetPCS(source_profile); if (target_profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_profile); switch (signature) { case cmsSigCmykData: { target_colorspace=CMYKColorspace; target_type=(cmsUInt32Number) TYPE_CMYK_16; target_channels=4; break; } case cmsSigLabData: { target_colorspace=LabColorspace; target_type=(cmsUInt32Number) TYPE_Lab_16; target_channels=3; break; } case cmsSigGrayData: { target_colorspace=GRAYColorspace; target_type=(cmsUInt32Number) TYPE_GRAY_16; target_channels=1; break; } case cmsSigLuvData: { target_colorspace=YUVColorspace; target_type=(cmsUInt32Number) TYPE_YUV_16; target_channels=3; break; } case cmsSigRgbData: { target_colorspace=sRGBColorspace; target_type=(cmsUInt32Number) TYPE_RGB_16; target_channels=3; break; } case cmsSigXYZData: { target_colorspace=XYZColorspace; target_type=(cmsUInt32Number) TYPE_XYZ_16; target_channels=3; break; } case cmsSigYCbCrData: { target_colorspace=YCbCrColorspace; target_type=(cmsUInt32Number) TYPE_YCbCr_16; target_channels=3; break; } default: { target_colorspace=UndefinedColorspace; target_type=(cmsUInt32Number) TYPE_RGB_16; target_channels=3; break; } } if ((source_colorspace == UndefinedColorspace) || (target_colorspace == UndefinedColorspace)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace == GRAYColorspace) && (IsImageGray(image,exception) == MagickFalse)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace == CMYKColorspace) && (image->colorspace != CMYKColorspace)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace == XYZColorspace) && (image->colorspace != XYZColorspace)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace == YCbCrColorspace) && (image->colorspace != YCbCrColorspace)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace != CMYKColorspace) && (source_colorspace != LabColorspace) && (source_colorspace != XYZColorspace) && (source_colorspace != YCbCrColorspace) && (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); switch (image->rendering_intent) { case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break; case PerceptualIntent: intent=INTENT_PERCEPTUAL; break; case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break; case SaturationIntent: intent=INTENT_SATURATION; break; default: intent=INTENT_PERCEPTUAL; break; } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformThreadSet(image,source_profile, source_type,target_profile,target_type,intent,flags); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_pixels=AcquirePixelThreadSet(image->columns,source_channels); target_pixels=AcquirePixelThreadSet(image->columns,target_channels); if ((source_pixels == (unsigned short **) NULL) || (target_pixels == (unsigned short **) NULL)) { transform=DestroyTransformThreadSet(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); if (source_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_profile); if (target_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_profile); return(MagickFalse); } if (target_colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_colorspace,exception); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register ssize_t x; register Quantum *restrict q; register unsigned short *p; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } p=source_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=ScaleQuantumToShort(GetPixelRed(image,q)); if (source_channels > 1) { *p++=ScaleQuantumToShort(GetPixelGreen(image,q)); *p++=ScaleQuantumToShort(GetPixelBlue(image,q)); } if (source_channels > 3) *p++=ScaleQuantumToShort(GetPixelBlack(image,q)); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_pixels[id],target_pixels[id], (unsigned int) image->columns); p=target_pixels[id]; q-=image->columns*GetPixelChannels(image); for (x=0; x < (ssize_t) image->columns; x++) { if (target_channels == 1) SetPixelGray(image,ScaleShortToQuantum(*p),q); else SetPixelRed(image,ScaleShortToQuantum(*p),q); p++; if (target_channels > 1) { SetPixelGreen(image,ScaleShortToQuantum(*p),q); p++; SetPixelBlue(image,ScaleShortToQuantum(*p),q); p++; } if (target_channels > 3) { SetPixelBlack(image,ScaleShortToQuantum(*p),q); p++; } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ProfileImage) #endif proceed=SetImageProgress(image,ProfileImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_colorspace,exception); switch (signature) { case cmsSigRgbData: { image->type=image->alpha_trait != BlendPixelTrait ? TrueColorType : TrueColorMatteType; break; } case cmsSigCmykData: { image->type=image->alpha_trait != BlendPixelTrait ? ColorSeparationType : ColorSeparationMatteType; break; } case cmsSigGrayData: { image->type=image->alpha_trait != BlendPixelTrait ? GrayscaleType : GrayscaleMatteType; break; } default: break; } target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); if (cmsGetDeviceClass(source_profile) != cmsSigLinkClass) status=SetImageProfile(image,name,profile,exception); if (target_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_profile); } (void) cmsCloseProfile(source_profile); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); WriteTo8BimProfile(image,name,(StringInfo *) NULL); profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, unsigned int *quantum) { *quantum=(size_t) (*p++ << 24); *quantum|=(size_t) (*p++ << 16); *quantum|=(size_t) (*p++ << 8); *quantum|=(size_t) (*p++ << 0); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++ << 8); *quantum|=(unsigned short) (*p++ << 0); return(p); }static inline void WriteResourceLong(unsigned char *p, const unsigned int quantum) { unsigned char buffer[4]; buffer[0]=(unsigned char) (quantum >> 24); buffer[1]=(unsigned char) (quantum >> 16); buffer[2]=(unsigned char) (quantum >> 8); buffer[3]=(unsigned char) quantum; (void) CopyMagickMemory(p,buffer,4); } static void WriteTo8BimProfile(Image *image,const char *name, const StringInfo *profile) { const unsigned char *datum, *q; register const unsigned char *p; size_t length; StringInfo *profile_8bim; ssize_t count; unsigned char length_byte; unsigned int value; unsigned short id, profile_id; if (LocaleCompare(name,"icc") == 0) profile_id=0x040f; else if (LocaleCompare(name,"iptc") == 0) profile_id=0x0404; else if (LocaleCompare(name,"xmp") == 0) profile_id=0x0424; else return; profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,"8bim"); if (profile_8bim == (StringInfo *) NULL) return; datum=GetStringInfoDatum(profile_8bim); length=GetStringInfoLength(profile_8bim); for (p=datum; p < (datum+length-16); ) { q=p; if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((count & 0x01) != 0) count++; if ((p > (datum+length-count)) || (count > (ssize_t) length)) break; if (id != profile_id) p+=count; else { size_t extent, offset; ssize_t extract_count; StringInfo *extract_profile; extract_count=0; extent=(datum+length)-(p+count); if (profile == (StringInfo *) NULL) { offset=(q-datum); extract_profile=AcquireStringInfo(offset+extent); (void) CopyMagickMemory(extract_profile->datum,datum,offset); } else { offset=(p-datum); extract_count=profile->length; if ((extract_count & 0x01) != 0) extract_count++; extract_profile=AcquireStringInfo(offset+extract_count+extent); (void) CopyMagickMemory(extract_profile->datum,datum,offset-4); WriteResourceLong(extract_profile->datum+offset-4, (unsigned int)profile->length); (void) CopyMagickMemory(extract_profile->datum+offset, profile->datum,profile->length); } (void) CopyMagickMemory(extract_profile->datum+offset+extract_count, p+count,extent); (void) AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString("8bim"),CloneStringInfo(extract_profile)); extract_profile=DestroyStringInfo(extract_profile); break; } } } static void GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block,ExceptionInfo *exception) { const unsigned char *datum; register const unsigned char *p; size_t length; ssize_t count; StringInfo *profile; unsigned char length_byte; unsigned int value; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((p > (datum+length-count)) || (count > (ssize_t) length)) break; switch (id) { case 0x03ed: { unsigned int resolution; unsigned short units; /* Resolution. */ p=ReadResourceLong(p,&resolution); image->resolution.x=((double) resolution)/65536.0; p=ReadResourceShort(p,&units)+2; p=ReadResourceLong(p,&resolution)+4; image->resolution.y=((double) resolution)/65536.0; /* Values are always stored as pixels per inch. */ if ((ResolutionType) units != PixelsPerCentimeterResolution) image->units=PixelsPerInchResolution; else { image->units=PixelsPerCentimeterResolution; image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"iptc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"icc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"exif",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"xmp",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } } static MagickBooleanType SetImageProfileInternal(Image *image,const char *name, const StringInfo *profile,const MagickBooleanType recursive, ExceptionInfo *exception) { char key[MaxTextExtent], property[MaxTextExtent]; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MaxTextExtent); LocaleLower(key); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),CloneStringInfo(profile)); if (status != MagickFalse) { if (LocaleCompare(name,"8bim") == 0) GetProfilesFromResourceBlock(image,profile,exception); else if (recursive == MagickFalse) WriteTo8BimProfile(image,name,profile); } /* Inject profile into image properties. */ (void) FormatLocaleString(property,MaxTextExtent,"%s:*",name); (void) GetImageProperty(image,property,exception); return(status); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile,ExceptionInfo *exception) { return(SetImageProfileInternal(image,name,profile,MagickFalse,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline unsigned short ReadProfileShort(const EndianType endian, unsigned char *buffer) { unsigned short value; if (endian == LSBEndian) { value=(unsigned short) ((buffer[1] << 8) | buffer[0]); return((unsigned short) (value & 0xffff)); } value=(unsigned short) ((((unsigned char *) buffer)[0] << 8) | ((unsigned char *) buffer)[1]); return((unsigned short) (value & 0xffff)); } static inline size_t ReadProfileLong(const EndianType endian, unsigned char *buffer) { size_t value; if (endian == LSBEndian) { value=(size_t) ((buffer[3] << 24) | (buffer[2] << 16) | (buffer[1] << 8 ) | (buffer[0])); return((size_t) (value & 0xffffffff)); } value=(size_t) ((buffer[0] << 24) | (buffer[1] << 16) | (buffer[2] << 8) | buffer[3]); return((size_t) (value & 0xffffffff)); } static inline size_t ReadProfileMSBLong(unsigned char **p, size_t *length) { size_t value; if (*length < 4) return(0); value=ReadProfileLong(MSBEndian,*p); (*length)-=4; *p+=4; return(value); } static inline unsigned short ReadProfileMSBShort(unsigned char **p, size_t *length) { unsigned short value; if (*length < 2) return(0); value=ReadProfileShort(MSBEndian,*p); (*length)-=2; *p+=2; return(value); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) CopyMagickMemory(p,buffer,4); return; } buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) CopyMagickMemory(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) CopyMagickMemory(p,buffer,2); return; } buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) CopyMagickMemory(p,buffer,2); } static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile) { size_t count, length; unsigned char *p; unsigned short id; length=GetStringInfoLength(profile); p=GetStringInfoDatum(profile); while(length != 0) { if (ReadProfileByte(&p,&length) != 0x38) continue; if (ReadProfileByte(&p,&length) != 0x42) continue; if (ReadProfileByte(&p,&length) != 0x49) continue; if (ReadProfileByte(&p,&length) != 0x4D) continue; if (length < 7) return(MagickFalse); id=ReadProfileMSBShort(&p,&length); count=ReadProfileByte(&p,&length); if (count > length) return(MagickFalse); p+=count; if ((*p & 0x01) == 0) p++; count=ReadProfileMSBLong(&p,&length); if (count > length) return(MagickFalse); if (id == 0x3ED && count == 16) { if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian, (unsigned int) (image->resolution.x*2.54* 65536.0),p); else WriteProfileLong(MSBEndian, (unsigned int) (image->resolution.x* 65536.0),p); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4); if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian, (unsigned int) (image->resolution.y*2.54* 65536.0),p+8); else WriteProfileLong(MSBEndian, (unsigned int) (image->resolution.y* 65536.0),p+8); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12); } p+=count; length-=count; } return(MagickTrue); } MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; size_t entry, length, number_entries; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; unsigned char *directory, *exif; /* Set EXIF resolution tag. */ length=GetStringInfoLength(profile); exif=GetStringInfoDatum(profile); if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); if ((id != 0x4949) && (id != 0x4D4D)) { while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); } endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ((int) ReadProfileLong(endian,exif+4)); if ((offset < 0) || (size_t) offset >= length) return(MagickFalse); directory=exif+offset; level=0; entry=0; do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { int components; register unsigned char *p, *q; size_t number_bytes; ssize_t format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format-1) >= EXIF_NUM_FORMATS) break; components=(ssize_t) ((int) ReadProfileLong(endian,q+4)); number_bytes=(size_t) components*format_bytes[format]; if ((ssize_t) number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { ssize_t offset; /* The directory entry contains an offset. */ offset=(ssize_t) ((int) ReadProfileLong(endian,q+8)); if ((size_t) (offset+number_bytes) > length) continue; if (~length < number_bytes) continue; /* prevent overflow */ p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p); (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p); (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) image->orientation,p); break; } (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) (image->units+1),p); break; } (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { ssize_t offset; offset=(ssize_t) ((int) ReadProfileLong(endian,p)); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ((int) ReadProfileLong(endian,directory+2+(12* number_entries))); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); return(MagickTrue); } MagickPrivate MagickBooleanType SyncImageProfiles(Image *image) { MagickBooleanType status; StringInfo *profile; status=MagickTrue; profile=(StringInfo *) GetImageProfile(image,"8BIM"); if (profile != (StringInfo *) NULL) if (Sync8BimProfile(image,profile) == MagickFalse) status=MagickFalse; profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile != (StringInfo *) NULL) if (SyncExifProfile(image,profile) == MagickFalse) status=MagickFalse; return(status); }
example-omp.c
// PWR009: Use OpenMP teams to offload work to GPU // https://www.appentra.com/knowledge/checks/pwr009 void example(double (*A)[100], double (*B)[100], double (*C)[100]) { #pragma omp target map(to: A[0:100][0:100], B[0:100][0:100]) map(tofrom: C[0:100][0:100]) { #pragma omp parallel default(none) shared(A, B, C) { #pragma omp for schedule(auto) for (int i = 0; i < 100; i++) { for (int j = 0; j < 100; j++) { for (int k = 0; k < 100; k++) { C[i][j] += A[i][k] * B[k][j]; } } } } // end parallel } // end target }
FFT.h
#pragma once #include <vector> #include <algorithm> #ifdef MULTICORE #include <omp.h> #endif #include <libff/algebra/fields/field_utils.hpp> #include <libfqfft/tools/exceptions.hpp> /** * Let F_p denote a field of prime order p. * The Discrete Fourier Transform (DFT) of a vector 'a[0...(n-1)]' where n = 2^k and a_i \in F_p is defined as: * * a_i = \sum_{j=0}^{n-1} { \omega_n^{i j} \cdot a_j } * * Here, \omega is a primitive nth root of unity in F_p. * Typically, the a_i's are also from the field F_p. * However, in some cases we might want the a_j's to be elements of a group, say an elliptic curve. * In that case, \omega_n^{i j} \cdot a_j is actually a scalar multiplication in the elliptic curve group. * * This is why we use GroupType here to indicate the type of group elements the a_i's are. * Note: We refer to the DFT as FFT in this code. */ namespace libcryptotemplate { using libfqfft::DomainSizeException; /** * A modification of libfqfft's code, which is based on pseudocode from [CLRS 2n Ed, pp. 864]. * This is the non-parallelized version. * Also, note that it's the caller's responsibility to multiply by 1/N when using this for an inverse DFT. */ template<typename GroupT, typename FieldT> void FFT_serial(std::vector<GroupT> &a, const FieldT &omega) { const size_t n = a.size(), logn = libff::log2(n); if (n != (1u << logn)) throw DomainSizeException("expected n == (1u << logn)"); /* swapping in place (from Storer's book) */ for (size_t k = 0; k < n; ++k) { const size_t rk = libff::bitreverse(k, logn); if (k < rk) std::swap(a[k], a[rk]); } size_t m = 1; // invariant: m = 2^{s-1} for (size_t s = 1; s <= logn; ++s) { // w_m is 2^s-th root of unity now const FieldT w_m = omega^(n/(2*m)); asm volatile ("/* pre-inner */"); for (size_t k = 0; k < n; k += 2*m) { FieldT w = FieldT::one(); for (size_t j = 0; j < m; ++j) { const GroupT t = w * a[k+j+m]; a[k+j+m] = a[k+j] - t; a[k+j] = a[k+j] + t; w *= w_m; } } asm volatile ("/* post-inner */"); m *= 2; } } template<typename GroupT, typename FieldT> void FFT(std::vector<GroupT> &a) { size_t n = libff::get_power_of_two(a.size()); FieldT omega = libff::get_root_of_unity<FieldT>(n); #ifdef MULTICORE # error "Did not test the parallel FFT path yet" #else FFT_serial<GroupT, FieldT>(a, omega); #endif } template<typename GroupT, typename FieldT> void invFFT(std::vector<GroupT> &a) { size_t n = libff::get_power_of_two(a.size()); FieldT omega = libff::get_root_of_unity<FieldT>(n); #ifdef MULTICORE # error "Did not test the parallel FFT path yet" #else FFT_serial<GroupT, FieldT>(a, omega.inverse()); #endif const FieldT sconst = FieldT(static_cast<long>(n)).inverse(); for(size_t i = 0; i < n; i++) { a[i] = sconst * a[i]; } } template<typename GroupT, typename FieldT> void FFT_parallel(std::vector<GroupT> &a, const FieldT &omega) { #ifdef MULTICORE const size_t num_cpus = omp_get_max_threads(); #else const size_t num_cpus = 1; #endif const size_t log_cpus = ((num_cpus & (num_cpus - 1)) == 0 ? libff::log2(num_cpus) : libff::log2(num_cpus) - 1); if (log_cpus == 0) FFT_serial(a, omega); else FFT_parallel_inner(a, omega, log_cpus); } template<typename GroupT, typename FieldT> void FFT_parallel_inner(std::vector<FieldT> &a, const FieldT &omega, const size_t log_cpus) { const size_t num_cpus = 1ul<<log_cpus; const size_t m = a.size(); const size_t log_m = libff::log2(m); if (m != 1ul<<log_m) throw DomainSizeException("expected m == 1ul<<log_m"); if (log_m < log_cpus) { FFT_serial(a, omega); return; } std::vector<std::vector<FieldT> > tmp(num_cpus); for (size_t j = 0; j < num_cpus; ++j) { tmp[j].resize(1ul<<(log_m-log_cpus), FieldT::zero()); } #ifdef MULTICORE #pragma omp parallel for #endif for (size_t j = 0; j < num_cpus; ++j) { const FieldT omega_j = omega^j; const FieldT omega_step = omega^(j<<(log_m - log_cpus)); FieldT elt = FieldT::one(); for (size_t i = 0; i < 1ul<<(log_m - log_cpus); ++i) { for (size_t s = 0; s < num_cpus; ++s) { // invariant: elt is omega^(j*idx) const size_t idx = (i + (s<<(log_m - log_cpus))) % (1u << log_m); tmp[j][i] += elt * a[idx]; elt *= omega_step; } elt *= omega_j; } } const FieldT omega_num_cpus = omega^num_cpus; #ifdef MULTICORE #pragma omp parallel for #endif for (size_t j = 0; j < num_cpus; ++j) { FFT_serial(tmp[j], omega_num_cpus); } #ifdef MULTICORE #pragma omp parallel for #endif for (size_t i = 0; i < num_cpus; ++i) { for (size_t j = 0; j < 1ul<<(log_m - log_cpus); ++j) { // now: i = idx >> (log_m - log_cpus) and j = idx % (1u << (log_m - log_cpus)), for idx = ((i<<(log_m-log_cpus))+j) % (1u << log_m) a[(j<<log_cpus) + i] = tmp[i][j]; } } } } // end of libcryptotemplate
cvAdvDiff_bnd_omp.c
/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds and Ting Yan @ SMU * Based on cvAdvDiff_bnd.c and parallelized with OpenMP * ----------------------------------------------------------------- * LLNS/SMU Copyright Start * Copyright (c) 2017, Southern Methodist University and * Lawrence Livermore National Security * * This work was performed under the auspices of the U.S. Department * of Energy by Southern Methodist University and Lawrence Livermore * National Laboratory under Contract DE-AC52-07NA27344. * Produced at Southern Methodist University and the Lawrence * Livermore National Laboratory. * * All rights reserved. * For details, see the LICENSE file. * LLNS/SMU Copyright End * ----------------------------------------------------------------- * Example problem: * * The following is a simple example problem with a banded Jacobian, * solved using CVODE. * The problem is the semi-discrete form of the advection-diffusion * equation in 2-D: * du/dt = d^2 u / dx^2 + .5 du/dx + d^2 u / dy^2 * on the rectangle 0 <= x <= 2, 0 <= y <= 1, and the time * interval 0 <= t <= 1. Homogeneous Dirichlet boundary conditions * are posed, and the initial condition is * u(x,y,t=0) = x(2-x)y(1-y)exp(5xy). * The PDE is discretized on a uniform MX+2 by MY+2 grid with * central differencing, and with boundary values eliminated, * leaving an ODE system of size NEQ = MX*MY. * This program solves the problem with the BDF method, Newton * iteration with the SUNBAND linear solver, and a user-supplied * Jacobian routine. * It uses scalar relative and absolute tolerances. * Output is printed at t = .1, .2, ..., 1. * Run statistics (optional outputs) are printed at the end. * * Optionally, we can set the number of threads from environment * variable or command line. To check the current value for number * of threads from environment: * % echo $OMP_NUM_THREADS * * Execution: * * To use the default value or the number of threads from the * environment value, run without arguments: * % ./cvAdvDiff_bnd_omp * The environment variable can be over-ridden with a command line * argument specifying the number of threads to use, e.g: * % ./cvAdvDiff_bnd_omp 5 * ----------------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <math.h> /* Header files with a description of contents */ #include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */ #include <nvector/nvector_openmp.h> /* serial N_Vector types, fcts., macros */ #include <sunmatrix/sunmatrix_band.h> /* access to band SUNMatrix */ #include <sunlinsol/sunlinsol_band.h> /* access to band SUNLinearSolver */ #include <sundials/sundials_types.h> /* definition of type realtype */ #include <sundials/sundials_math.h> /* definition of ABS and EXP */ #ifdef _OPENMP #include <omp.h> #endif /* Problem Constants */ #define XMAX RCONST(2.0) /* domain boundaries */ #define YMAX RCONST(1.0) #define MX 10 /* mesh dimensions */ #define MY 5 #define NEQ MX*MY /* number of equations */ #define ATOL RCONST(1.0e-5) /* scalar absolute tolerance */ #define T0 RCONST(0.0) /* initial time */ #define T1 RCONST(0.1) /* first output time */ #define DTOUT RCONST(0.1) /* output time increment */ #define NOUT 10 /* number of output times */ #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define TWO RCONST(2.0) #define FIVE RCONST(5.0) /* User-defined vector access macro IJth */ /* IJth is defined in order to isolate the translation from the mathematical 2-dimensional structure of the dependent variable vector to the underlying 1-dimensional storage. IJth(vdata,i,j) references the element in the vdata array for u at mesh point (i,j), where 1 <= i <= MX, 1 <= j <= MY. The vdata array is obtained via the macro call vdata = NV_DATA_S(v), where v is an N_Vector. The variables are ordered by the y index j, then by the x index i. */ #define IJth(vdata,i,j) (vdata[(j-1) + (i-1)*MY]) /* Type : UserData (contains grid constants) */ typedef struct { realtype dx, dy, hdcoef, hacoef, vdcoef; int nthreads; } *UserData; /* Private Helper Functions */ static void SetIC(N_Vector u, UserData data); static void PrintHeader(realtype reltol, realtype abstol, realtype umax); static void PrintOutput(realtype t, realtype umax, long int nst); static void PrintFinalStats(void *cvode_mem); /* Private function to check function return values */ static int check_retval(void *returnvalue, const char *funcname, int opt); /* Functions Called by the Solver */ static int f(realtype t, N_Vector u, N_Vector udot, void *user_data); static int Jac(realtype t, N_Vector u, N_Vector fu, SUNMatrix J, void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3); /* *------------------------------- * Main Program *------------------------------- */ int main(int argc, char *argv[]) { realtype dx, dy, reltol, abstol, t, tout, umax; N_Vector u; UserData data; SUNMatrix A; SUNLinearSolver LS; void *cvode_mem; int iout, retval; long int nst; int num_threads; u = NULL; data = NULL; A = NULL; LS = NULL; cvode_mem = NULL; /* Set the number of threads to use */ num_threads = 1; /* default value */ #ifdef _OPENMP num_threads = omp_get_max_threads(); /* Overwrite with OMP_NUM_THREADS environment variable */ #endif if (argc > 1) /* overwrite with command line value, if supplied */ num_threads = strtol(argv[1], NULL, 0); /* Create an OpenMP vector */ u = N_VNew_OpenMP(NEQ, num_threads); /* Allocate u vector */ if(check_retval((void*)u, "N_VNew_OpenMP", 0)) return(1); reltol = ZERO; /* Set the tolerances */ abstol = ATOL; data = (UserData) malloc(sizeof *data); /* Allocate data memory */ if(check_retval((void *)data, "malloc", 2)) return(1); dx = data->dx = XMAX/(MX+1); /* Set grid coefficients in data */ dy = data->dy = YMAX/(MY+1); data->hdcoef = ONE/(dx*dx); data->hacoef = HALF/(TWO*dx); data->vdcoef = ONE/(dy*dy); data->nthreads = num_threads; SetIC(u, data); /* Initialize u vector */ /* Call CVodeCreate to create the solver memory and specify the * Backward Differentiation Formula */ cvode_mem = CVodeCreate(CV_BDF); if(check_retval((void *)cvode_mem, "CVodeCreate", 0)) return(1); /* Call CVodeInit to initialize the integrator memory and specify the * user's right hand side function in u'=f(t,u), the inital time T0, and * the initial dependent variable vector u. */ retval = CVodeInit(cvode_mem, f, T0, u); if(check_retval(&retval, "CVodeInit", 1)) return(1); /* Call CVodeSStolerances to specify the scalar relative tolerance * and scalar absolute tolerance */ retval = CVodeSStolerances(cvode_mem, reltol, abstol); if (check_retval(&retval, "CVodeSStolerances", 1)) return(1); /* Set the pointer to user-defined data */ retval = CVodeSetUserData(cvode_mem, data); if(check_retval(&retval, "CVodeSetUserData", 1)) return(1); /* Create banded SUNMatrix for use in linear solves -- since this will be factored, set the storage bandwidth to be the sum of upper and lower bandwidths */ A = SUNBandMatrix(NEQ, MY, MY); if(check_retval((void *)A, "SUNBandMatrix", 0)) return(1); /* Create banded SUNLinearSolver object for use by CVode */ LS = SUNLinSol_Band(u, A); if(check_retval((void *)LS, "SUNLinSol_Band", 0)) return(1); /* Call CVodeSetLinearSolver to attach the matrix and linear solver to CVode */ retval = CVodeSetLinearSolver(cvode_mem, LS, A); if(check_retval(&retval, "CVodeSetLinearSolver", 1)) return(1); /* Set the user-supplied Jacobian routine Jac */ retval = CVodeSetJacFn(cvode_mem, Jac); if(check_retval(&retval, "CVodeSetJacFn", 1)) return(1); /* In loop over output points: call CVode, print results, test for errors */ umax = N_VMaxNorm(u); PrintHeader(reltol, abstol, umax); for(iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) { retval = CVode(cvode_mem, tout, u, &t, CV_NORMAL); if(check_retval(&retval, "CVode", 1)) break; umax = N_VMaxNorm(u); retval = CVodeGetNumSteps(cvode_mem, &nst); check_retval(&retval, "CVodeGetNumSteps", 1); PrintOutput(t, umax, nst); } PrintFinalStats(cvode_mem); /* Print some final statistics */ printf("num_threads = %i\n\n", num_threads); N_VDestroy_OpenMP(u); /* Free the u vector */ CVodeFree(&cvode_mem); /* Free the integrator memory */ SUNLinSolFree(LS); /* Free the linear solver memory */ SUNMatDestroy(A); /* Free the matrix memory */ free(data); /* Free the user data */ return(0); } /* *------------------------------- * Functions called by the solver *------------------------------- */ /* f routine. Compute f(t,u). */ static int f(realtype t, N_Vector u,N_Vector udot, void *user_data) { realtype uij, udn, uup, ult, urt, hordc, horac, verdc, hdiff, hadv, vdiff; realtype *udata, *dudata; int i, j; UserData data; udata = NV_DATA_OMP(u); dudata = NV_DATA_OMP(udot); /* Extract needed constants from data */ data = (UserData) user_data; hordc = data->hdcoef; horac = data->hacoef; verdc = data->vdcoef; /* Loop over all grid points. */ #pragma omp parallel for default(shared) private(j, i, uij, udn, uup, ult, urt, hdiff, hadv, vdiff) num_threads(data->nthreads) for (j=1; j <= MY; j++) { for (i=1; i <= MX; i++) { /* Extract u at x_i, y_j and four neighboring points */ uij = IJth(udata, i, j); udn = (j == 1) ? ZERO : IJth(udata, i, j-1); uup = (j == MY) ? ZERO : IJth(udata, i, j+1); ult = (i == 1) ? ZERO : IJth(udata, i-1, j); urt = (i == MX) ? ZERO : IJth(udata, i+1, j); /* Set diffusion and advection terms and load into udot */ hdiff = hordc*(ult - TWO*uij + urt); hadv = horac*(urt - ult); vdiff = verdc*(uup - TWO*uij + udn); IJth(dudata, i, j) = hdiff + hadv + vdiff; } } return(0); } /* Jacobian routine. Compute J(t,u). */ static int Jac(realtype t, N_Vector u, N_Vector fu, SUNMatrix J, void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3) { sunindextype i, j, k; realtype *kthCol, hordc, horac, verdc; UserData data; /* The components of f = udot that depend on u(i,j) are f(i,j), f(i-1,j), f(i+1,j), f(i,j-1), f(i,j+1), with df(i,j)/du(i,j) = -2 (1/dx^2 + 1/dy^2) df(i-1,j)/du(i,j) = 1/dx^2 + .25/dx (if i > 1) df(i+1,j)/du(i,j) = 1/dx^2 - .25/dx (if i < MX) df(i,j-1)/du(i,j) = 1/dy^2 (if j > 1) df(i,j+1)/du(i,j) = 1/dy^2 (if j < MY) */ data = (UserData) user_data; hordc = data->hdcoef; horac = data->hacoef; verdc = data->vdcoef; #pragma omp parallel for collapse(2) default(shared) private(i, j, k, kthCol) num_threads(data->nthreads) for (j=1; j <= MY; j++) { for (i=1; i <= MX; i++) { k = j-1 + (i-1)*MY; kthCol = SUNBandMatrix_Column(J,k); /* set the kth column of J */ SM_COLUMN_ELEMENT_B(kthCol,k,k) = -TWO*(verdc+hordc); if (i != 1) SM_COLUMN_ELEMENT_B(kthCol,k-MY,k) = hordc + horac; if (i != MX) SM_COLUMN_ELEMENT_B(kthCol,k+MY,k) = hordc - horac; if (j != 1) SM_COLUMN_ELEMENT_B(kthCol,k-1,k) = verdc; if (j != MY) SM_COLUMN_ELEMENT_B(kthCol,k+1,k) = verdc; } } return(0); } /* *------------------------------- * Private helper functions *------------------------------- */ /* Set initial conditions in u vector */ static void SetIC(N_Vector u, UserData data) { int i, j; realtype x, y, dx, dy; realtype *udata; /* Extract needed constants from data */ dx = data->dx; dy = data->dy; /* Set pointer to data array in vector u. */ udata = NV_DATA_OMP(u); /* Load initial profile into u vector */ #pragma omp parallel for default(shared) private(j, i, y, x) for (j=1; j <= MY; j++) { y = j*dy; for (i=1; i <= MX; i++) { x = i*dx; IJth(udata,i,j) = x*(XMAX - x)*y*(YMAX - y)*SUNRexp(FIVE*x*y); } } } /* Print first lines of output (problem description) */ static void PrintHeader(realtype reltol, realtype abstol, realtype umax) { printf("\n2-D Advection-Diffusion Equation\n"); printf("Mesh dimensions = %d X %d\n", MX, MY); printf("Total system size = %d\n", NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: reltol = %Lg abstol = %Lg\n\n", reltol, abstol); printf("At t = %Lg max.norm(u) =%14.6Le \n", T0, umax); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol); printf("At t = %g max.norm(u) =%14.6e \n", T0, umax); #else printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol); printf("At t = %g max.norm(u) =%14.6e \n", T0, umax); #endif return; } /* Print current value */ static void PrintOutput(realtype t, realtype umax, long int nst) { #if defined(SUNDIALS_EXTENDED_PRECISION) printf("At t = %4.2Lf max.norm(u) =%14.6Le nst = %4ld\n", t, umax, nst); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst); #else printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst); #endif return; } /* Get and print some final statistics */ static void PrintFinalStats(void *cvode_mem) { int retval; long int nst, nfe, nsetups, netf, nni, ncfn, nje, nfeLS; retval = CVodeGetNumSteps(cvode_mem, &nst); check_retval(&retval, "CVodeGetNumSteps", 1); retval = CVodeGetNumRhsEvals(cvode_mem, &nfe); check_retval(&retval, "CVodeGetNumRhsEvals", 1); retval = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups); check_retval(&retval, "CVodeGetNumLinSolvSetups", 1); retval = CVodeGetNumErrTestFails(cvode_mem, &netf); check_retval(&retval, "CVodeGetNumErrTestFails", 1); retval = CVodeGetNumNonlinSolvIters(cvode_mem, &nni); check_retval(&retval, "CVodeGetNumNonlinSolvIters", 1); retval = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn); check_retval(&retval, "CVodeGetNumNonlinSolvConvFails", 1); retval = CVodeGetNumJacEvals(cvode_mem, &nje); check_retval(&retval, "CVodeGetNumJacEvals", 1); retval = CVodeGetNumLinRhsEvals(cvode_mem, &nfeLS); check_retval(&retval, "CVodeGetNumLinRhsEvals", 1); printf("\nFinal Statistics:\n"); printf("nst = %-6ld nfe = %-6ld nsetups = %-6ld nfeLS = %-6ld nje = %ld\n", nst, nfe, nsetups, nfeLS, nje); printf("nni = %-6ld ncfn = %-6ld netf = %ld\n", nni, ncfn, netf); return; } /* Check function return value... opt == 0 means SUNDIALS function allocates memory so check if returned NULL pointer opt == 1 means SUNDIALS function returns an integer value so check if retval < 0 opt == 2 means function allocates memory so check if returned NULL pointer */ static int check_retval(void *returnvalue, const char *funcname, int opt) { int *retval; /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ if (opt == 0 && returnvalue == NULL) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } /* Check if retval < 0 */ else if (opt == 1) { retval = (int *) returnvalue; if (*retval < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval); return(1); }} /* Check if function returned NULL pointer - no memory allocated */ else if (opt == 2 && returnvalue == NULL) { fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }
tinyexr.h
/* Copyright (c) 2014 - 2017, Syoyo Fujita All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Syoyo Fujita nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- #ifndef TINYEXR_H_ #define TINYEXR_H_ // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif // Use embedded miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-5) #define TINYEXR_ERROR_CANT_OPEN_FILE (-6) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-7) #define TINYEXR_ERROR_INVALID_HEADER (-8) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 int tiled; // tile format image int long_name; // long name attribute int non_image; // deep image(EXR 2.0) int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; int data_window[4]; int display_window[4]; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute custom_attributes[TINYEXR_MAX_ATTRIBUTES]; EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { to be removed. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x hight` extern int SaveEXR(const float *data, int width, int height, int components, const char *filename); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Free's internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Free's internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if succes. // Returns negative value and may set error string in `err` when there's an // error extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // `out_rgba` must have enough memory(at least sizeof(float) x 4(RGBA) x width x // hight) // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRFromMemory(float *out_rgba, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEIFNED #define TINYEXR_IMPLEMENTATION_DEIFNED #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <sstream> #include <string> #include <vector> #if __cplusplus > 199711L // C++11 #include <cstdint> #endif // __cplusplus > 199711L #ifdef _OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #else #include "zlib.h" #endif #if TINYEXR_USE_ZFP #include "zfp.h" #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif #if TINYEXR_USE_MINIZ namespace miniz { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wunused-function" #if __has_warning("-Wcomma") #pragma clang diagnostic ignored "-Wcomma" #endif #endif /* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major release with Zip64 support (almost there!): - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug (thanks kahmyong.moon@hp.com) which could cause locate files to not find files. This bug would only have occured in earlier versions if you explicitly used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you can't switch to v1.15 but want to fix this bug, just remove the uses of this flag from both helper funcs (and of course don't use the flag). - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when pUser_read_buf is not NULL and compressed size is > uncompressed size - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract compressed data from directory entries, to account for weird zipfiles which contain zero-size compressed data on dir entries. Hopefully this fix won't cause any issues on weird zip archives, because it assumes the low 16-bits of zip external attributes are DOS attributes (which I believe they always are in practice). - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the internal attributes, just the filename and external attributes - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed - Added cmake support for Linux builds which builds all the examples, tested with clang v3.3 and gcc v4.6. - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti - Merged MZ_FORCEINLINE fix from hdeanclark - Fix <time.h> include before config #ifdef, thanks emil.brink - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping (super useful for OpenGL apps), and explicit control over the compression level (so you can set it to 1 for real-time compression). - Merged in some compiler fixes from paulharris's github repro. - Retested this build under Windows (VS 2010, including static analysis), tcc 0.9.26, gcc v4.6 and clang v3.3. - Added example6.c, which dumps an image of the mandelbrot set to a PNG file. - Modified example2 to help test the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix possible src file fclose() leak if alignment bytes+local header file write faiiled - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the wrong central dir header offset, appears harmless in this release, but it became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect). 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files. - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning). - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64. - Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test. - Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives. - Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.) - Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's. level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <bruced@valvesoftware.com> for the feedback/bug report. 5/28/11 v1.11 - Added statement from unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: - Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a - Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). - Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. - Refactored the compression code for better readability and maintainability. - Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). 5/15/11 v1.09 - Initial stable release. * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED //#include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be // CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on // stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able // to get the current time, or // get/set file times, and the C run-time funcs that get/set times won't be // called. // The current downside is the times written to your archives will be from 1979. #define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. #define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive // API's. //#define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression // API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent // conflicts against stock zlib. //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom // user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's // which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and // tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc // on Linux #define MINIZ_NO_TIME #endif #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) //#include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #if MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient // integer loads and stores from unaligned addresses. //#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \ 0 // disable to suppress compiler warnings #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are // reasonably fast (and don't involve compiler generated calls to helper // functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some // parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! typedef unsigned long mz_ulong; // mz_free() internally uses the MZ_FREE() macro (which by default calls free() // unless you've modified the MZ_MALLOC macro) to release a block allocated from // the heap. void mz_free(void *p); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with // ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with // ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: // items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void (*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_VERSION "9.1.15" #define MZ_VERNUM 0x91F0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 15 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The // other values are for advanced use (refer to the zlib docs). enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels: 0-9 are the standard zlib-style levels, 10 is best // possible compression (not zlib compatible, and may be very slow), // MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been // optimized purely for performance, not ratio. // (This special func. is currently only enabled when // MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with // zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no // header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as // calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input // and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or // MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not // available, and/or there's more output to be written but the output buffer // is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been // written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or // output buffers are empty. (Fill up the input buffer or free up some output // space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of // data that could be generated by deflate(), assuming flush is set to only // MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on // failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of // data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that // controls the window size and whether or not the stream has been wrapped with // a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or // -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the // input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output // buffers are both sized large enough to decompress the entire stream in a // single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside // what's already in the input buffer, and that the output buffer is large // enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or // there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes // have been written. For zlib streams, the adler-32 of the decompressed data // has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is // empty but the inflater needs more input to continue, or if the output // buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when // using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on // failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the // error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used // as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you // use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // An attempt to work around MSVC's spammy "warning C4127: conditional // expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct mz_zip_archive_tag { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is // 0 this function returns the number of bytes needed to fully store the // filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and // modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive // file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient // in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the // archive's filename so it can be reopened for writing. If the file can't be // reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be // growable using the realloc callback (which defaults to realloc unless you've // overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's // user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what // you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record // the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a // forwardslash with empty buffer. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records // the disk file's modified time into the archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no // recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by // the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive // struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be // valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if // mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) // appends a memory blob to a ZIP archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags used by tinfl_decompress(). // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and // ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the // input is a raw deflate stream. // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available // beyond the end of the supplied input buffer. If clear, the input buffer // contains all remaining input. // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large // enough to hold the entire decompressed stream. If clear, the output buffer is // at least the size of the dictionary (typically 32KB). // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the // decompressed bytes. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data // to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must call mz_free() on the returned block when it's no longer // needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block // in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes // written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an // internal 32KB buffer, and a user provided callback function will be called to // flush the buffer. // Returns 1 on success or 0 on failure. typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function // actually needed for decompression. All the other functions are just // high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any // desired higher level decompression API. In the limit case, it can be called // once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly // slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // tdefl_init() compression flags logically OR'd together (low 12 bits contain // the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes // per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap // compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before // the deflate data, and the Adler-32 of the source data at the end. Otherwise, // you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even // when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more // efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's // initialization time to the minimum, but the output may vary from run to run // given the same input (depending on the contents of memory). // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. // The low 12 bits are reserved to control the max # of hash probes per // dictionary lookup (see TDEFL_MAX_PROBES_MASK). enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against // the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in // memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be // 1, 2, 3, or 4. // The image pitch in bytes per scanline will be w*num_chans. The leftmost // pixel on the top scanline is stored first in memory. // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL // If flip is true, the image will be flipped on the Y axis (useful for OpenGL // apps). // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must mz_free() the returned heap block (which will typically be // larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write // compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The // above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed // output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper // functions aren't flexible enough. The low-level functions don't make any heap // allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1 } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; // tdefl's compression state structure. typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. // There is no corresponding deinit() function because the tdefl API's do not // dynamically allocate memory. // pBut_buf_func: If NULL, output data will be supplied to the specified // callback. In this case, the user should call the tdefl_compress_buffer() API // for compression. // If pBut_buf_func is NULL the user should always call the tdefl_compress() // API. // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, // etc.) tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the specified input buffer // as possible, and writing as much compressed data to the specified output // buffer as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a // non-NULL tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't // defined, because it uses some of its macros. #ifndef MINIZ_NO_ZLIB_APIS // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be // much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, // MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #endif // #ifndef MINIZ_NO_ZLIB_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want // the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; //#include <assert.h> //#include <string.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void)x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE inline __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C // implementation that balances processor cache usage against speed": // http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c}; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } void mz_free(void *p) { MZ_FREE(p); } #ifndef MINIZ_NO_ZLIB_APIS static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } //static void *def_realloc_func(void *opaque, void *address, size_t items, // size_t size) { // (void)opaque, (void)address, (void)items, (void)size; // return MZ_REALLOC(address, items * size); //} const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty // tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are // large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress( &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some // uncompressed data left in the output dictionary - // oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress // without supplying more input or by setting flush // to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data // when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's // at least 1 more byte on the way. If there's no more room left in the // output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = {{MZ_OK, ""}, {MZ_STREAM_END, "stream end"}, {MZ_NEED_DICT, "need dictionary"}, {MZ_ERRNO, "file error"}, {MZ_STREAM_ERROR, "stream error"}, {MZ_DATA_ERROR, "data error"}, {MZ_MEM_ERROR, "out of memory"}, {MZ_BUF_ERROR, "buf error"}, {MZ_VERSION_ERROR, "version error"}, {MZ_PARAM_ERROR, "parameter error"}}; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif // MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all // compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do { \ for (;;) { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt // to read beyond the input buf, then something is wrong with the input because // the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of // the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) \ do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for (;;) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes // remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode // the next Huffman code (and absolutely no more). It works by trying to fully // decode a // Huffman code by using whatever bits are currently present in the bit buffer. // If this fails, it reads another byte, and tries again until it succeeds or // until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex // than you would initially expect because the zlib API expects the decompressor // to never read // beyond the final byte of the deflate stream. (In other words, when this macro // wants to read another byte from the input, it REALLY needs another byte in // order to fully // decode the next Huffman code.) Handling this properly is particularly // important on raw deflate (non-zlib) streams, which aren't followed by a byte // aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0}; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static const int s_min_table_sizes[3] = {257, 1, 4}; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1ULL << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit: r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress( &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression // API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285}; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0}; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17}; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29}; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted // values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, // alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = \ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS( d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF}; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && // MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output // buffer and send a raw block instead. if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS( d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed // block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN( (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && \ (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better // register utilization. Intended for applications where raw throughput is // valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to // TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output // buffer. if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } #ifndef MINIZ_NO_ZLIB_APIS static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; // level may actually range from [0,10] (10 is a "hidden" max level, where we // want a bit more compression and it's fine if throughput to fall off a cliff // on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #endif // MINIZ_NO_ZLIB_APIS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning( \ disable : 4267) // 'argument': conversion from '__int64' to 'int', // possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public // domain: https://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. // This is actually a modification of Alex's original code so PNG files // generated by this function pass pngcheck. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was // defined. static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init( pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06}; mz_uint8 pnghdr[41] = {0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0, (mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0, 0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16), (mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41, 0x54}; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter( "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's // where #defined out) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #error "No arvhive APIs" #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #else #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler // alignment and platform endian issues, miniz.c doesn't use structs for any of // this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ ((element_type *)((array_ptr)->m_p))[index] static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; // On Linux with x86 glibc, this call will fail on large files (>= 0x80000000 // bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } #ifndef MINIZ_NO_TIME static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif // #ifndef MINIZ_NO_TIME #endif // #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central // directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), // but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename( mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for (;;) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); // Basic sanity checks - reject files which are too small, and check the first // 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end // towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; // Read the entire central directory into a heap block, and allocate another // heap block to hold the unsorted central dir file record offsets, and // another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return MZ_FALSE; } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some // basic sanity checking on each record, and check for zip64 entries (which // are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return MZ_FALSE; } file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh( mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; // First see if the filename ends with a '/' character. filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } // Bugfix: This code was also checking if the internal attribute was non-zero, // which wasn't correct. // Most/all zip writers (hopefully) set DOS file/directory attributes in the // low 16-bits, so check for the DOS directory flag and ignore the source OS // ID in the created by field. // FIXME: Remove this check? Is it necessary - we already check the filename. external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & 0x10) != 0) return MZ_TRUE; return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_reader_filename_compare( const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input // buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #endif return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #endif return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input // buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #endif return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32( file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback( pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } #endif // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); #ifdef _MSC_VER if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #else if ((!n) || ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #endif return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc( pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max // size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is // NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can // resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the // user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory // location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir( mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header( pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start // with a forward slash, cannot contain a drive letter, and cannot use // DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment( mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an // allocation fails the file remains unmodified. (A good idea if we're doing // an in-place modification.) if ((!mz_zip_array_ensure_room( pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32( uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer( pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back( pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid // central directory. (This may not always succeed, but we can try.) if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE; if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE; if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ // ---------------------- end of miniz ---------------------------------------- #ifdef __clang__ #pragma clang diagnostic pop #endif } #else // Reuse MINIZ_LITTE_ENDIAN macro #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #endif // TINYEXR_USE_MINIZ // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static const int kEXRVersionSize = 8; static void swap2(unsigned short *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } static void swap4(unsigned int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 // Reuse MINIZ_LITTLE_ENDIAN flag from miniz. union FP32 { unsigned int u; float f; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 static const char *ReadString(std::string *s, const char *ptr) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((*q) != 0) q++; (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen)); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; int data_window[4]; int line_order; int display_window[4]; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; void clear() { channels.clear(); attributes.clear(); data_window[0] = 0; data_window[1] = 0; data_window[2] = 0; data_window[3] = 0; line_order = 0; display_window[0] = 0; display_window[1] = 0; display_window[2] = 0; display_window[3] = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; } } HeaderInfo; static void ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; p = ReadString(&info.name, p); memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling)); channels.push_back(info); } } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling)); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(src_size); int ret = miniz::mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static void DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); assert(ret == miniz::MZ_OK); (void)ret; #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); assert(ret == Z_OK); (void)ret; #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressable run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; if (0 > (maxLength -= count)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static void DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); assert(ret == static_cast<int>(uncompressed_size)); (void)ret; // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierachical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- int len : 8; // code length 0 int lit : 24; // lit p size int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // int hlink[HUF_ENCSIZE]; long long *fHeap[HUF_ENCSIZE]; *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); long long scode[HUF_ENCSIZE]; memset(scode, 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode); memcpy(frq, scode, sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode > ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { int *p = pl->p; pl->p = new int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #define getCode(po, rlc, c, lc, in, out, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; unsigned short *oe = out + no; const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; getCode(pl.lit, rlc, c, lc, in, out, oe); } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; getCode(pl.p[j], rlc, c, lc, in, out, oe); break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; getCode(pl.lit, rlc, c, lc, in, out, oe); } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(long long freq[HUF_ENCSIZE], const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; long long freq[HUF_ENCSIZE]; countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq, &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq, im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq, raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, unsigned short raw[], int nRaw) { if (nCompressed == 0) { if (nRaw != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, nRaw, raw); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { unsigned char bitmap[BITMAP_SIZE]; unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap, minNonZero, maxNonZero); unsigned short lut[USHORT_RANGE]; unsigned short maxValue = forwardLutFromBitmap(bitmap, lut); applyLut(lut, &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSize) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } unsigned char bitmap[BITMAP_SIZE]; unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap, 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } unsigned short lut[USHORT_RANGE]; memset(lut, 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap, lut); // // Huffman decoding // int length; length = *(reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut, &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; int precision; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0f; } }; bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) && (attributes[i].size == 1)) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; } } if (!foundType) { return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else { assert(0); } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, int num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = dst_width * dst_num_lines * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((dst_width & 3U) || (dst_num_lines & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, dst_width, dst_num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = dst_width * dst_num_lines; for (int c = 0; c < num_channels; c++) { // decompress 4x4 pixel block. for (int y = 0; y < dst_num_lines; y += 4) { for (int x = 0; x < dst_width; x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * dst_width + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((width & 3U) || (num_lines & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, width, num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = width * num_lines; for (int c = 0; c < num_channels; c++) { // compress 4x4 pixel block. for (int y = 0; y < num_lines; y += 4) { for (int x = 0; x < width; x += 4) { float fblock[16]; for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * width + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = zfp_stream_compressed_size(zfp); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // static void DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); assert(ret); (void)ret; // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val = line_ptr[u]; tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); tinyexr::DecompressZip(reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len)); // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val = line_ptr[u]; tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len)); // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val = line_ptr[u]; tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, attributes, num_attributes)) { assert(0); return; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + c * static_cast<size_t>(width) * sizeof(unsigned short)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += y * x_stride; } else { outLine += (height - 1 - y) * x_stride; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += y * x_stride; } else { outLine += (height - 1 - y) * x_stride; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + c * static_cast<size_t>(width) * sizeof(float)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += y * x_stride; } else { outLine += (height - 1 - y) * x_stride; } for (int u = 0; u < width; u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + c * static_cast<size_t>(width) * sizeof(unsigned int)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += y * x_stride; } else { outLine += (height - 1 - y) * x_stride; } for (int u = 0; u < width; u++) { unsigned int val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } static void DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { assert(tile_offset_x * tile_size_x < data_width); assert(tile_offset_y * tile_size_y < data_height); // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static void ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { assert(0); } } } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; info->data_window[0] = 0; info->data_window[1] = 0; info->data_window[2] = 0; info->data_window[3] = 0; info->line_order = 0; // @fixme info->display_window[0] = 0; info->display_window[1] = 0; info->display_window[2] = 0; info->display_window[3] = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (version->tiled && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int ReadChannelInfo(info->channels, data); if (info->channels.size() < 1) { if (err) { (*err) = "# of channels is zero."; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { memcpy(&info->data_window[0], &data.at(0), sizeof(int)); memcpy(&info->data_window[1], &data.at(4), sizeof(int)); memcpy(&info->data_window[2], &data.at(8), sizeof(int)); memcpy(&info->data_window[3], &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3])); has_data_window = true; } else if (attr_name.compare("displayWindow") == 0) { memcpy(&info->display_window[0], &data.at(0), sizeof(int)); memcpy(&info->display_window[1], &data.at(4), sizeof(int)); memcpy(&info->display_window[2], &data.at(8), sizeof(int)); memcpy(&info->display_window[3], &data.at(12), sizeof(int)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[3])); has_display_window = true; } else if (attr_name.compare("lineOrder") == 0) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } else if (attr_name.compare("pixelAspectRatio") == 0) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio)); has_pixel_aspect_ratio = true; } else if (attr_name.compare("screenWindowCenter") == 0) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[1])); has_screen_window_center = true; } else if (attr_name.compare("screenWindowWidth") == 0) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_width)); has_screen_window_width = true; } else if (attr_name.compare("chunkCount") == 0) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count)); } else { // Custom attribute(up to TINYEXR_MAX_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_ATTRIBUTES) { EXRAttribute attrib; strncpy(attrib.name, attr_name.c_str(), 255); attrib.name[255] = '\0'; strncpy(attrib.type, attr_type.c_str(), 255); attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window[0] = info.display_window[0]; exr_header->display_window[1] = info.display_window[1]; exr_header->display_window[2] = info.display_window[2]; exr_header->display_window[3] = info.display_window[3]; exr_header->data_window[0] = info.data_window[0]; exr_header->data_window[1] = info.data_window[1]; exr_header->data_window[2] = info.data_window[2]; exr_header->data_window[3] = info.data_window[3]; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } assert(info.attributes.size() < TINYEXR_MAX_ATTRIBUTES); exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy poiner exr_header->custom_attributes[i].value = info.attributes[i].value; } exr_header->header_len = info.header_len; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const std::vector<tinyexr::tinyexr_uint64> &offsets, const unsigned char *head) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels); if (exr_header->tiled) { size_t num_tiles = offsets.size(); // = # of blocks exr_image->tiles = static_cast<EXRTile *>( malloc(sizeof(EXRTile) * static_cast<size_t>(num_tiles))); for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) { // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3])); // @todo{ LoD } assert(tile_coordinates[2] == 0); assert(tile_coordinates[3] == 0); int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); assert(data_len >= 4); // Move to data addr: 20 = 16 + 4; data_ptr += 20; tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; exr_image->num_tiles = static_cast<int>(num_tiles); } } else { // scanline format exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { size_t y_idx = static_cast<size_t>(y); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window[3] + 1)); int num_lines = end_line_no - line_no; assert(num_lines > 0); // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y line_no -= exr_header->data_window[1]; tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); } // omp parallel } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if (offset >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; // Read offset tables. size_t num_blocks; if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); } else if (exr_header->tiled) { // @todo { LoD } size_t num_x_tiles = static_cast<size_t>(data_width) / static_cast<size_t>(exr_header->tile_size_x); if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) < static_cast<size_t>(data_width)) { num_x_tiles++; } size_t num_y_tiles = static_cast<size_t>(data_height) / static_cast<size_t>(exr_header->tile_size_y); if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) < static_cast<size_t>(data_height)) { num_y_tiles++; } num_blocks = num_x_tiles * num_y_tiles; } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } } std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks); for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { if (err) { (*err) = "Invalid offset value."; } return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { if (err) { (*err) = "Cannot reconstruct lineOffset table."; } return TINYEXR_ERROR_INVALID_DATA; } } } return DecodeChunk(exr_image, exr_header, offsets, head); } } // namespace tinyexr int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { if (out_rgba == NULL) { if (err) { (*err) = "Invalid argument.\n"; } return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { return ret; } if (exr_version.multipart || exr_version.non_image) { if (err) { (*err) = "Loading multipart or DeepImage is not supported yet.\n"; } return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if ((idxA == 0) && (idxR == -1) && (idxG == -1) && (idxB == -1)) { // Alpha channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } else { // Assume RGB(A) if (idxR == -1) { if (err) { (*err) = "R channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { if (err) { (*err) = "G channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { if (err) { (*err) = "B channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { if (err) { (*err) = "Invalid argument.\n"; } // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { #ifdef _WIN32 (*err) = _strdup(err_str.c_str()); // May leak #else (*err) = strdup(err_str.c_str()); // May leak #endif } } ConvertHeader(exr_header, info); // transfoer `tiled` from version. exr_header->tiled = version->tiled; return ret; } int LoadEXRFromMemory(float *out_rgba, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { if (err) { (*err) = "Invalid argument.\n"; } return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if (idxR == -1) { if (err) { (*err) = "R channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { if (err) { (*err) = "G channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { if (err) { (*err) = "B channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } // Assume `out_rgba` have enough memory allocated. for (int i = 0; i < exr_image.width * exr_image.height; i++) { out_rgba[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; out_rgba[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; out_rgba[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA > 0) { out_rgba[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { out_rgba[4 * i + 3] = 1.0; } } return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { if (err) { (*err) = "EXRHeader is not initialized."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } size_t SaveEXRImageToMemory(const EXRImage *exr_image, const EXRHeader *exr_header, unsigned char **memory_out, const char **err) { if (exr_image == NULL || memory_out == NULL || exr_header->compression_type < 0) { if (err) { (*err) = "Invalid argument."; } return 0; // @fixme } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { if (err) { (*err) = "PIZ compression is not supported in this build."; } return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { if (err) { (*err) = "ZFP compression is not supported in this build."; } return 0; } #endif #if TINYEXR_USE_ZFP for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) { if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) { if (err) { (*err) = "Pixel type must be FLOAT for ZFP compression."; } return 0; } } #endif std::vector<unsigned char> memory; // Header { const char header[] = {0x76, 0x2f, 0x31, 0x01}; memory.insert(memory.end(), header, header + 4); } // Version, scanline. { char marker[] = {2, 0, 0, 0}; /* @todo if (exr_header->tiled) { marker[1] |= 0x2; } if (exr_header->long_name) { marker[1] |= 0x4; } if (exr_header->non_image) { marker[1] |= 0x8; } if (exr_header->multipart) { marker[1] |= 0x10; } */ memory.insert(memory.end(), marker, marker + 4); } int num_scanlines = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } // Write attributes. std::vector<tinyexr::ChannelInfo> channels; { std::vector<unsigned char> data; for (int c = 0; c < exr_header->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_header->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_header->channels[c].name); channels.push_back(info); } tinyexr::WriteChannelInfo(data, channels); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_header->compression_type; tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp)); tinyexr::WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char *>(&comp), 1); } { int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3])); tinyexr::WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); tinyexr::WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { float aspectRatio = 1.0f; tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio)); tinyexr::WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float)); } { float center[2] = {0.0f, 0.0f}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[1])); tinyexr::WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float)); } { float w = static_cast<float>(exr_image->width); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char *>(&w), sizeof(float)); } // Custom attributes if (exr_header->num_custom_attributes > 0) { for (int i = 0; i < exr_header->num_custom_attributes; i++) { tinyexr::WriteAttributeToMemory( &memory, exr_header->custom_attributes[i].name, exr_header->custom_attributes[i].type, reinterpret_cast<const unsigned char *>( exr_header->custom_attributes[i].value), exr_header->custom_attributes[i].size); } } { // end of header unsigned char e = 0; memory.push_back(e); } int num_blocks = exr_image->height / num_scanlines; if (num_blocks * num_scanlines < exr_image->height) { num_blocks++; } std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks)); size_t headerSize = memory.size(); tinyexr::tinyexr_uint64 offset = headerSize + static_cast<size_t>(num_blocks) * sizeof( tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable) std::vector<unsigned char> data; std::vector<std::vector<unsigned char> > data_list( static_cast<size_t>(num_blocks)); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } } #endif // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { size_t ii = static_cast<size_t>(i); int start_y = num_scanlines * i; int endY = (std::min)(num_scanlines * (i + 1), exr_image->height); int h = endY - start_y; std::vector<unsigned char> buf( static_cast<size_t>(exr_image->width * h * pixel_data_size)); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f)); // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = f32.f; } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { unsigned short val = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap2(&val); // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = val; } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = h16.u; } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { float val = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = val; } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { unsigned int val = reinterpret_cast<unsigned int **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(&val); // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = val; } } } } if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(buf.size()); memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), buf.begin(), buf.begin() + data_len); } else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 1024 + static_cast<unsigned int>( 1.2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, exr_image->width, h); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), exr_image->width, h, exr_header->num_channels, zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else { assert(0); } } // omp parallel for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { data.insert(data.end(), data_list[i].begin(), data_list[i].end()); offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size(); } { memory.insert( memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)), reinterpret_cast<unsigned char *>(&offsets.at(0)) + sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks)); } { memory.insert(memory.end(), data.begin(), data.end()); } assert(memory.size() > 0); (*memory_out) = static_cast<unsigned char *>(malloc(memory.size())); memcpy((*memory_out), &memory.at(0), memory.size()); return memory.size(); // OK } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { if (err) { (*err) = "PIZ compression is not supported in this build."; } return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { if (err) { (*err) = "ZFP compression is not supported in this build."; } return 0; } #endif #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "wb"); #else FILE *fp = fopen(filename, "wb"); #endif if (!fp) { if (err) { (*err) = "Cannot write a file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if ((mem_size > 0) && mem) { fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = fopen(filename, "rb"); if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); if (err) { (*err) = "File size is zero."; } return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { if (err) { (*err) = "Invalid magic number."; } return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { if (err) { (*err) = "Unsupported version or scanline."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { if (err) { (*err) = "Unsupported compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int tinyexr::ReadChannelInfo(channels, data); num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { if (err) { (*err) = "Invalid channels format."; } return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh)); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&h)); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { if (err) { (*err) = "Unsupported format."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize)); assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize)); assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui = *reinterpret_cast<unsigned int *>( &sample_data.at(data_offset + x * sizeof(int))); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; f16.u = *reinterpret_cast<unsigned short *>( &sample_data.at(data_offset + x * sizeof(short))); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f = *reinterpret_cast<float *>( &sample_data.at(data_offset + x * sizeof(float))); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->num_tiles = 0; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } return TINYEXR_SUCCESS; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { if (err) { (*err) = "fread error."; } return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err) { #ifdef _WIN32 (*err) = _strdup(err_str.c_str()); // may leak #else (*err) = strdup(err_str.c_str()); // may leak #endif } return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { if (err) { (*err) = "`chunkCount' attribute is not found in the header."; } return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); ConvertHeader(exr_header, infos[i]); // transfoer `tiled` from version. exr_header->tiled = exr_version->tiled; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { if (err) { (*err) = "fread error."; } return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { if (err) { (*err) = "EXRHeader is not initialized."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list; for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> offset_table( static_cast<size_t>(exr_headers[i]->chunk_count)); for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { if (err) { (*err) = "Invalid offset size."; } return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } chunk_offset_table_list.push_back(offset_table); } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> &offset_table = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (size_t c = 0; c < offset_table.size(); c++) { const unsigned char *part_number_addr = memory + offset_table[c] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { assert(0); return TINYEXR_ERROR_INVALID_DATA; } } int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table, memory); if (ret != TINYEXR_SUCCESS) { return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const char *outfilename) { if ((components == 1) || components == 3 || components == 4) { // OK } else { return TINYEXR_ERROR_INVALID_ARGUMENT; } // Assume at least 16x16 pixels. if (width < 16) return TINYEXR_ERROR_INVALID_ARGUMENT; if (height < 16) return TINYEXR_ERROR_INVALID_ARGUMENT; EXRHeader header; InitEXRHeader(&header); EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = {0, 0, 0, 0}; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { strncpy(header.channels[0].name, "A", 255); header.channels[0].name[strlen("A")] = '\0'; strncpy(header.channels[1].name, "B", 255); header.channels[1].name[strlen("B")] = '\0'; strncpy(header.channels[2].name, "G", 255); header.channels[2].name[strlen("G")] = '\0'; strncpy(header.channels[3].name, "R", 255); header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { strncpy(header.channels[0].name, "B", 255); header.channels[0].name[strlen("B")] = '\0'; strncpy(header.channels[1].name, "G", 255); header.channels[1].name[strlen("G")] = '\0'; strncpy(header.channels[2].name, "R", 255); header.channels[2].name[strlen("R")] = '\0'; } else { strncpy(header.channels[0].name, "A", 255); header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // pixel type of output image to be stored in // .EXR } const char *err; int ret = SaveEXRImageToFile(&image, &header, outfilename, &err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef _MSC_VER #pragma warning(pop) #endif #endif // TINYEXR_IMPLEMENTATION_DEIFNED #endif // TINYEXR_IMPLEMENTATION
inner_only_1.no.c
int main(int argc,char *argv[]) { int i; int j; double a[20][20]; // memset(a,0,(sizeof(a))); for (i = 0; i <= 18; i += 1) { #pragma omp parallel for for (j = 0; j <= 19; j += 1) { a[i][j] += a[i + 1][j]; } } return 0; }
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } template <typename T> struct AccType { using type = T; }; template <> struct AccType<mshadow::half::half_t> { using type = float; }; #define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int32_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int32"; \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int64"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Invalid loading enum type " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) #define MXNET_ADD_ALL_TYPES_WITH_BOOL \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) \ .add_enum("bool", mshadow::kBool) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); if (from.type_flag_ == mshadow::kBool || to.type_flag_ == mshadow::kBool) { CHECK_EQ(from.type_flag_, to.type_flag_) << "Only supports copying between boolean ndarrays."; mshadow::Copy(to.FlatTo1D<xpu, bool>(s), from.FlatTo1D<xpu, bool>(s), s); return; } MSHADOW_TYPE_SWITCH(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and two scalar value with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
GB_emult_01_phase0.c
//------------------------------------------------------------------------------ // GB_emult_01_phase0: find vectors of C to compute for C=A.*B or C<M>=A.*B //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // The eWise multiply of two matrices, C=A.*B, C<M>=A.*B, or C<!M>=A.*B starts // with this phase, which determines which vectors of C need to be computed. // On input, A and B are the two matrices being ewise multiplied, and M is the // optional mask matrix. If present, it is not complemented. // The M, A, and B matrices are sparse or hypersparse. C will be sparse // (if Ch is returned NULL) or hypersparse (if Ch is returned non-NULL). // Ch: the vectors to compute in C. Not allocated, but equal to either // A->h, B->h, or M->h, or NULL if C is not hypersparse. // C_to_A: if A is hypersparse, and Ch is not A->h, then C_to_A [k] = kA // if the kth vector j = Ch [k] is equal to Ah [kA]. If j does not appear // in A, then C_to_A [k] = -1. Otherwise, C_to_A is returned as NULL. // C is always hypersparse in this case. // C_to_B: if B is hypersparse, and Ch is not B->h, then C_to_B [k] = kB // if the kth vector j = Ch [k] is equal to Bh [kB]. If j does not appear // in B, then C_to_B [k] = -1. Otherwise, C_to_B is returned as NULL. // C is always hypersparse in this case. // C_to_M: if M is hypersparse, and Ch is not M->h, then C_to_M [k] = kM // if the kth vector j = GBH (Ch, k) is equal to Mh [kM]. // If j does not appear in M, then C_to_M [k] = -1. Otherwise, C_to_M is // returned as NULL. C is always hypersparse in this case. // FUTURE:: exploit A==M, B==M, and A==B aliases #include "GB_emult.h" GrB_Info GB_emult_01_phase0 // find vectors in C for C=A.*B or C<M>=A.*B ( int64_t *p_Cnvec, // # of vectors to compute in C const int64_t *restrict *Ch_handle, // Ch is M->h, A->h, B->h, or NULL size_t *Ch_size_handle, int64_t *restrict *C_to_M_handle, // C_to_M: size Cnvec, or NULL size_t *C_to_M_size_handle, int64_t *restrict *C_to_A_handle, // C_to_A: size Cnvec, or NULL size_t *C_to_A_size_handle, int64_t *restrict *C_to_B_handle, // C_to_B: size Cnvec, or NULL size_t *C_to_B_size_handle, int *C_sparsity, // sparsity structure of C // original input: const GrB_Matrix M, // optional mask, may be NULL const GrB_Matrix A, const GrB_Matrix B, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- // M, A, and B can be jumbled for this phase ASSERT (p_Cnvec != NULL) ; ASSERT (Ch_handle != NULL) ; ASSERT (Ch_size_handle != NULL) ; ASSERT (C_to_A_handle != NULL) ; ASSERT (C_to_B_handle != NULL) ; ASSERT_MATRIX_OK_OR_NULL (M, "M for emult phase0", GB0) ; ASSERT (!GB_ZOMBIES (M)) ; ASSERT (GB_JUMBLED_OK (M)) ; // pattern not accessed ASSERT (!GB_PENDING (M)) ; ASSERT_MATRIX_OK (A, "A for emult phase0", GB0) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_JUMBLED_OK (B)) ; // pattern not accessed ASSERT (!GB_PENDING (A)) ; ASSERT_MATRIX_OK (B, "B for emult phase0", GB0) ; ASSERT (!GB_ZOMBIES (B)) ; ASSERT (GB_JUMBLED_OK (A)) ; // pattern not accessed ASSERT (!GB_PENDING (B)) ; ASSERT (A->vdim == B->vdim) ; ASSERT (A->vlen == B->vlen) ; ASSERT (GB_IMPLIES (M != NULL, A->vdim == M->vdim)) ; ASSERT (GB_IMPLIES (M != NULL, A->vlen == M->vlen)) ; //-------------------------------------------------------------------------- // initializations //-------------------------------------------------------------------------- (*p_Cnvec) = 0 ; (*Ch_handle) = NULL ; (*Ch_size_handle) = 0 ; if (C_to_M_handle != NULL) { (*C_to_M_handle) = NULL ; } (*C_to_A_handle) = NULL ; (*C_to_B_handle) = NULL ; ASSERT ((*C_sparsity) == GxB_SPARSE || (*C_sparsity) == GxB_HYPERSPARSE) ; const int64_t *restrict Ch = NULL ; size_t Ch_size = 0 ; int64_t *restrict C_to_M = NULL ; size_t C_to_M_size = 0 ; int64_t *restrict C_to_A = NULL ; size_t C_to_A_size = 0 ; int64_t *restrict C_to_B = NULL ; size_t C_to_B_size = 0 ; //-------------------------------------------------------------------------- // get content of M, A, and B //-------------------------------------------------------------------------- int64_t n = A->vdim ; int64_t Anvec = A->nvec ; int64_t vlen = A->vlen ; const int64_t *restrict Ah = A->h ; bool A_is_hyper = (Ah != NULL) ; int64_t Bnvec = B->nvec ; const int64_t *restrict Bh = B->h ; bool B_is_hyper = (Bh != NULL) ; int64_t Mnvec = 0 ; const int64_t *restrict Mh = NULL ; bool M_is_hyper = false ; if (M != NULL) { Mnvec = M->nvec ; Mh = M->h ; M_is_hyper = (Mh != NULL) ; } //-------------------------------------------------------------------------- // determine how to construct the vectors of C //-------------------------------------------------------------------------- if (M != NULL) { //---------------------------------------------------------------------- // 8 cases to consider: A, B, M can each be hyper or sparse //---------------------------------------------------------------------- // Mask is present and not complemented if (A_is_hyper) { if (B_is_hyper) { if (M_is_hyper) { //---------------------------------------------------------- // (1) A hyper, B hyper, M hyper: C hyper //---------------------------------------------------------- // Ch = smaller of Mh, Bh, Ah int64_t nvec = GB_IMIN (Anvec, Bnvec) ; nvec = GB_IMIN (nvec, Mnvec) ; if (nvec == Anvec) { Ch = Ah ; Ch_size = A->h_size ; } else if (nvec == Bnvec) { Ch = Bh ; Ch_size = B->h_size ; } else // (nvec == Mnvec) { Ch = Mh ; Ch_size = M->h_size ; } } else { //---------------------------------------------------------- // (2) A hyper, B hyper, M sparse: C hyper //---------------------------------------------------------- // Ch = smaller of Ah, Bh if (Anvec <= Bnvec) { Ch = Ah ; Ch_size = A->h_size ; } else { Ch = Bh ; Ch_size = B->h_size ; } } } else { if (M_is_hyper) { //---------------------------------------------------------- // (3) A hyper, B sparse, M hyper: C hyper //---------------------------------------------------------- // Ch = smaller of Mh, Ah if (Anvec <= Mnvec) { Ch = Ah ; Ch_size = A->h_size ; } else { Ch = Mh ; Ch_size = M->h_size ; } } else { //---------------------------------------------------------- // (4) A hyper, B sparse, M sparse: C hyper //---------------------------------------------------------- Ch = Ah ; Ch_size = A->h_size ; } } } else { if (B_is_hyper) { if (M_is_hyper) { //---------------------------------------------------------- // (5) A sparse, B hyper, M hyper: C hyper //---------------------------------------------------------- // Ch = smaller of Mh, Bh if (Bnvec <= Mnvec) { Ch = Bh ; Ch_size = B->h_size ; } else { Ch = Mh ; Ch_size = M->h_size ; } } else { //---------------------------------------------------------- // (6) A sparse, B hyper, M sparse: C hyper //---------------------------------------------------------- Ch = Bh ; Ch_size = B->h_size ; } } else { if (M_is_hyper) { //---------------------------------------------------------- // (7) A sparse, B sparse, M hyper: C hyper //---------------------------------------------------------- Ch = Mh ; Ch_size = M->h_size ; } else { //---------------------------------------------------------- // (8) A sparse, B sparse, M sparse: C sparse //---------------------------------------------------------- Ch = NULL ; } } } } else { //---------------------------------------------------------------------- // 4 cases to consider: A, B can be hyper or sparse //---------------------------------------------------------------------- // Mask is not present, or present and complemented. if (A_is_hyper) { if (B_is_hyper) { //-------------------------------------------------------------- // (1) A hyper, B hyper: C hyper //-------------------------------------------------------------- // Ch = smaller of Ah, Bh if (Anvec <= Bnvec) { Ch = Ah ; Ch_size = A->h_size ; } else { Ch = Bh ; Ch_size = B->h_size ; } } else { //-------------------------------------------------------------- // (2) A hyper, B sparse: C hyper //-------------------------------------------------------------- Ch = Ah ; Ch_size = A->h_size ; } } else { if (B_is_hyper) { //-------------------------------------------------------------- // (3) A sparse, B hyper: C hyper //-------------------------------------------------------------- Ch = Bh ; Ch_size = B->h_size ; } else { //-------------------------------------------------------------- // (4) A sparse, B sparse: C sparse //-------------------------------------------------------------- Ch = NULL ; } } } //-------------------------------------------------------------------------- // find Cnvec //-------------------------------------------------------------------------- int64_t Cnvec ; if (Ch == NULL) { // C is sparse (*C_sparsity) = GxB_SPARSE ; Cnvec = n ; } else { // C is hypersparse; one of A, B, or M are hypersparse ASSERT (A_is_hyper || B_is_hyper || M_is_hyper) ; (*C_sparsity) = GxB_HYPERSPARSE ; if (Ch == Ah) { Cnvec = Anvec ; } else if (Ch == Bh) { Cnvec = Bnvec ; } else // (Ch == Mh) { Cnvec = Mnvec ; } } //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // construct C_to_M mapping //-------------------------------------------------------------------------- if (M_is_hyper && Ch != Mh) { // allocate C_to_M C_to_M = GB_MALLOC_WERK (Cnvec, int64_t, &C_to_M_size) ; if (C_to_M == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } // compute C_to_M ASSERT (Ch != NULL) ; const int64_t *restrict Mp = M->p ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < Cnvec ; k++) { int64_t pM, pM_end, kM = 0 ; int64_t j = Ch [k] ; GB_lookup (true, Mh, Mp, vlen, &kM, Mnvec-1, j, &pM, &pM_end) ; C_to_M [k] = (pM < pM_end) ? kM : -1 ; } } //-------------------------------------------------------------------------- // construct C_to_A mapping //-------------------------------------------------------------------------- if (A_is_hyper && Ch != Ah) { // allocate C_to_A C_to_A = GB_MALLOC_WERK (Cnvec, int64_t, &C_to_A_size) ; if (C_to_A == NULL) { // out of memory GB_FREE_WERK (&C_to_M, C_to_M_size) ; return (GrB_OUT_OF_MEMORY) ; } // compute C_to_A ASSERT (Ch != NULL) ; const int64_t *restrict Ap = A->p ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < Cnvec ; k++) { int64_t pA, pA_end, kA = 0 ; int64_t j = Ch [k] ; GB_lookup (true, Ah, Ap, vlen, &kA, Anvec-1, j, &pA, &pA_end) ; C_to_A [k] = (pA < pA_end) ? kA : -1 ; } } //-------------------------------------------------------------------------- // construct C_to_B mapping //-------------------------------------------------------------------------- if (B_is_hyper && Ch != Bh) { // allocate C_to_B C_to_B = GB_MALLOC_WERK (Cnvec, int64_t, &C_to_B_size) ; if (C_to_B == NULL) { // out of memory GB_FREE_WERK (&C_to_M, C_to_M_size) ; GB_FREE_WERK (&C_to_A, C_to_A_size) ; return (GrB_OUT_OF_MEMORY) ; } // compute C_to_B ASSERT (Ch != NULL) ; const int64_t *restrict Bp = B->p ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < Cnvec ; k++) { int64_t pB, pB_end, kB = 0 ; int64_t j = Ch [k] ; GB_lookup (true, Bh, Bp, vlen, &kB, Bnvec-1, j, &pB, &pB_end) ; C_to_B [k] = (pB < pB_end) ? kB : -1 ; } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- (*p_Cnvec) = Cnvec ; (*Ch_handle) = Ch ; (*Ch_size_handle) = Ch_size ; if (C_to_M_handle != NULL) { (*C_to_M_handle) = C_to_M ; (*C_to_M_size_handle) = C_to_M_size ; } (*C_to_A_handle) = C_to_A ; (*C_to_A_size_handle) = C_to_A_size ; (*C_to_B_handle) = C_to_B ; (*C_to_B_size_handle) = C_to_B_size ; //-------------------------------------------------------------------------- // The code below describes what the output contains: //-------------------------------------------------------------------------- #ifdef GB_DEBUG ASSERT (A != NULL) ; // A and B are always present ASSERT (B != NULL) ; int64_t jlast = -1 ; for (int64_t k = 0 ; k < Cnvec ; k++) { // C(:,j) is in the list, as the kth vector int64_t j ; if (Ch == NULL) { // C will be constructed as sparse j = k ; } else { // C will be constructed as hypersparse j = Ch [k] ; } // vectors j in Ch are sorted, and in the range 0:n-1 ASSERT (j >= 0 && j < n) ; ASSERT (j > jlast) ; jlast = j ; // see if A (:,j) exists if (C_to_A != NULL) { // A is hypersparse ASSERT (A_is_hyper) int64_t kA = C_to_A [k] ; ASSERT (kA >= -1 && kA < A->nvec) ; if (kA >= 0) { int64_t jA = A->h [kA] ; ASSERT (j == jA) ; } } else if (A_is_hyper) { // A is hypersparse, and Ch is a shallow copy of A->h ASSERT (Ch == A->h) ; } // see if B (:,j) exists if (C_to_B != NULL) { // B is hypersparse ASSERT (B_is_hyper) int64_t kB = C_to_B [k] ; ASSERT (kB >= -1 && kB < B->nvec) ; if (kB >= 0) { int64_t jB = B->h [kB] ; ASSERT (j == jB) ; } } else if (B_is_hyper) { // A is hypersparse, and Ch is a shallow copy of A->h ASSERT (Ch == B->h) ; } // see if M (:,j) exists if (Ch != NULL && M != NULL && Ch == M->h) { // Ch is the same as Mh ASSERT (M != NULL) ; ASSERT (M->h != NULL) ; ASSERT (Ch != NULL && M->h != NULL && Ch [k] == M->h [k]) ; ASSERT (C_to_M == NULL) ; } else if (C_to_M != NULL) { // M is present and hypersparse ASSERT (M != NULL) ; ASSERT (M->h != NULL) ; int64_t kM = C_to_M [k] ; ASSERT (kM >= -1 && kM < M->nvec) ; if (kM >= 0) { int64_t jM = M->h [kM] ; ASSERT (j == jM) ; } } else { // M is not present, or in sparse form ASSERT (M == NULL || M->h == NULL) ; } } #endif return (GrB_SUCCESS) ; }
hard.h
#pragma once template <class Tp> class VectorNoClear : public std::vector<Tp> { PS::U32 siz; public: VectorNoClear() : std::vector<Tp> (4) { siz = 0; } //VectorNoClear(std::vector<Tp> & vec) : std::vector<Tp>(vec) { siz = std::vector<Tp>::size(); } //VectorNoClear(VectorNoClear<Tp> & vec) { // if ( siz < vec.siz ) resize(vec.siz); // for (PS::S32 i=0; i<vec.siz; i++) std::vector<Tp>::at(i) = vec.at(i); // siz = vec.siz; //} void clear() { siz = 0; } void push_back(const Tp & pi) { if ( siz < std::vector<Tp>::size() ) { std::vector<Tp>::at(siz) = pi; } else { std::vector<Tp>::push_back(pi); } siz ++; //assert ( siz <= std::vector<Tp>::size() ); } void push_back(Tp && pi) { if ( siz < std::vector<Tp>::size() ) { //std::vector<Tp>::at(siz) = pi; this->at(siz) = pi; } else { std::vector<Tp>::push_back(pi); } siz ++; //assert ( siz <= std::vector<Tp>::size() ); } void resize(const PS::U32 n) { if ( n > std::vector<Tp>::size() ){ std::vector<Tp>::resize(n); } siz = n; } void reserve (const PS::U32 n) { if ( n > std::vector<Tp>::capacity() ){ std::vector<Tp>::reserve(n); } } PS::U32 size() const { return siz; } PS::U32 capacity() const { return std::vector<Tp>::capacity(); } }; class HardSystem{ public: std::vector<PS::S32> list_iso; //std::vector<std::vector<std::pair<bool,PS::S32> > > list_multi; //std::vector<std::vector<FPHard> > ptcl_multi; VectorNoClear<std::vector<std::pair<bool,PS::S32> > > list_multi; VectorNoClear<VectorNoClear<FPHard> > ptcl_multi; std::map<PS::S32,PS::S32> mp_cluster; // cluster ID -> cluster adress in HardSystem std::vector<Collision> collision_list; std::vector<std::pair<PS::S32,PS::S32> > frag_list; PS::S32 n_col; PS::S32 n_frag; PS::F64 edisp; PS::F64 edisp_d; //static PS::F64 f; PS::S32 getNumberOfClusterLocal() const { return ptcl_multi.size(); } PS::S32 getNumberOfClusterGlobal() const { #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL PS::S32 n_cluster_loc = ptcl_multi.size(); return PS::Comm::getSum(n_cluster_loc); #else return ptcl_multi.size(); #endif } PS::S32 getNumberOfIsolatedParticleLocal() const { return list_iso.size(); } PS::S32 getNumberOfIsolatedParticleGlobal() const { #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL PS::S32 n_ptcl_loc = list_iso.size(); return PS::Comm::getSum(n_ptcl_loc); #else return list_iso.size(); #endif } PS::S32 getNumberOfParticleLocal(){ PS::S32 n = 0; PS::S32 size = ptcl_multi.size(); for ( PS::S32 i=0; i<size; i++ ) n += ptcl_multi[i].size(); n += list_iso.size(); return n; } PS::S32 getNumberOfParticleGlobal() { #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL PS::S32 n_ptcl_loc = getNumberOfParticleLocal(); return PS::Comm::getSum(n_ptcl_loc); #else return getNumberOfParticleLocal(); #endif } PS::S32 getNumberOfParticleInLargestClusterLocal(){ PS::S32 n = 0; PS::S32 size = ptcl_multi.size(); for ( PS::S32 i=0; i<size; i++ ) if ( n < size ) n = ptcl_multi[i].size(); return n; } PS::S32 getNumberOfParticleInLargestClusterGlobal() { #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL PS::S32 n_loc = getNumberOfParticleInLargestClusterLocal(); return PS::Comm::getMaxValue(n_loc); #else return getNumberOfParticleInLargestClusterLocal(); #endif } PS::S32 getNumberOfFragmentLocal() const { return n_frag; } PS::S32 getNumberOfFragmentGlobal() const { #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL PS::S32 n_frag_ = n_frag; return PS::Comm::getSum(n_frag_); #else return n_frag; #endif } PS::S32 getNumberOfCollisionLocal() const { return n_col; } PS::S32 getNumberOfCollisionGlobal() const { #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL PS::S32 n_col_ = n_col; return PS::Comm::getSum(n_col_); #else return n_col; #endif } PS::F64 getEnergyDissipationLocal() const { return edisp; } PS::F64 getEnergyDissipationGlobal() const { #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL PS::F64 edisp_ = edisp; return PS::Comm::getSum(edisp_); #else return edisp; #endif } PS::F64 getHardEnergyDissipationLocal() const { return edisp_d; } PS::F64 getHardEnergyDissipationGlobal() const { #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL PS::F64 edisp_d_ = edisp_d; return PS::Comm::getSum(edisp_d_); #else return edisp_d; #endif } void showParticleID() const { PS::S32 size = ptcl_multi.size(); for ( PS::S32 i=0; i<size; i++ ){ bool flag = ptcl_multi.at(i).at(0).inDomain; std::cout << "Rank " << PS::Comm::getRank() << " Cluster " << ptcl_multi.at(i).at(0).id_cluster << " (" << flag << "): "; PS::S32 sizei = ptcl_multi.at(i).size(); for ( PS::S32 j=0; j<sizei; j++ ){ std::cout << " " << ptcl_multi.at(i).at(j).id; assert( flag == ptcl_multi.at(i).at(j).inDomain ); } std::cout << std::endl; } } void clear(){ list_iso.clear(); list_multi.clear(); ptcl_multi.clear(); mp_cluster.clear(); collision_list.clear(); frag_list.clear(); n_col = n_frag = 0; edisp = edisp_d = 0.; } template <class Tpsys, class Tpsys2> PS::S32 makeList(Tpsys & pp, Tpsys2 & ex_pp); template <class Tpsys, class Tpsys2, class NL, class NL2> PS::S32 timeIntegrate(Tpsys & pp, Tpsys2 & ex_pp, NL & NList, NL2 & ex_NList, const PS::S32 istep); static void rewriteFragmentID(PS::S32 * & id_frag_list, Collision * & col_list, PS::S32 n_col_tot, PS::S32 n_frag_tot, PS::S32 & id_next); template <class Tpsys, class Tpsys2> PS::S32 addFragment2ParticleSystem(Tpsys & pp, Tpsys2 & ex_pp, PS::S32 & id_next, std::ofstream & fp); }; template <class Tpsys, class Tpsys2> inline PS::S32 HardSystem::makeList(Tpsys & pp, Tpsys2 & ex_pp) { const PS::S32 n_pp = pp.getNumberOfParticleLocal(); const PS::S32 n_ex_pp = ex_pp.getNumberOfParticleLocal(); PS::S32 n_ptcl_loc = 0; PS::S32 n_cluster_loc = 0; PS::U32 tmp = 0; for ( PS::S32 i=0; i<n_pp; i++ ){ if ( pp[i].neighbor ){ if ( pp[i].isSent ) continue; auto itr = mp_cluster.find(pp[i].id_cluster); if ( itr == mp_cluster.end() ){ mp_cluster[pp[i].id_cluster] = tmp; list_multi.push_back(std::vector<std::pair<bool,PS::S32> >{std::make_pair(true, i)}); tmp ++; assert( list_multi.size() == tmp ); } else { assert( mp_cluster.at(pp[i].id_cluster) == itr->second ); list_multi.at(itr->second).push_back(std::make_pair(true, i)); } n_ptcl_loc ++; } else { list_iso.push_back(i); n_ptcl_loc ++; } } for ( PS::S32 i=0; i<n_ex_pp; i++ ){ assert( ex_pp[i].neighbor > 0 ); assert( ex_pp[i].isSent ); assert( !ex_pp[i].inDomain ); auto itr = mp_cluster.find(ex_pp[i].id_cluster); if ( itr == mp_cluster.end() ){ mp_cluster[ex_pp[i].id_cluster] = tmp; list_multi.push_back(std::vector<std::pair<bool,PS::S32> >{std::make_pair(false, i)}); tmp ++; assert( list_multi.size() == tmp ); } else { assert( mp_cluster.at(ex_pp[i].id_cluster) == itr->second ); list_multi.at(itr->second).push_back(std::make_pair(false, i)); } n_ptcl_loc ++; } n_cluster_loc = tmp; assert( list_multi.size() == tmp ); ptcl_multi.resize(n_cluster_loc); return n_ptcl_loc; } #if 0 template <class Tpsys, class Tpsys2, class NL, class NL2> inline PS::S32 HardSystem::timeIntegrate(Tpsys & pp, Tpsys2 & ex_pp, NL & NList, NL2 & ex_NList, const PS::S32 istep) { PS::S32 n_all = list_multi.size() + list_iso.size(); PS::S32 n_ptcl_loc = 0; #pragma omp parallel for reduction(+:n_ptcl_loc) schedule (dynamic) for ( PS::S32 ii=0; ii<n_all; ii++ ){ PS::S32 size = list_multi.size(); if ( ii<size ){ PS::S32 i = ii; //for ( PS::S32 i=0; i<size; i++ ){ PS::S32 n_p = list_multi.at(i).size(); PS::S32 id_cluster = 0; std::map<PS::S32, PS::S32> id_map; ptcl_multi[i].clear(); id_map.clear(); // Add Particle To Hard System ptcl_multi[i].reserve(n_p); for ( PS::S32 j=0; j<n_p; j++ ){ std::pair<bool,PS::S32> adr = list_multi.at(i).at(j); PS::S32 id_loc = adr.second; if ( adr.first ) { ptcl_multi[i].push_back(FPHard(pp[id_loc])); ptcl_multi[i][j].copyList(NList[id_loc]); } else { ptcl_multi[i].push_back(FPHard(ex_pp[id_loc])); ptcl_multi[i][j].copyList(ex_NList[id_loc]); } if ( j==0 ) id_cluster = ptcl_multi[i][j].id_cluster; assert ( ptcl_multi[i][j].id_cluster == id_cluster ); id_map[ptcl_multi[i][j].id] = j; } // Make Neighbor List #ifdef TEST_PTCL for ( PS::S32 j=0; j<n_p; j++ ) ptcl_multi[i][j].makeHardList(id_map, ptcl_multi[i]); #else for ( PS::S32 j=0; j<n_p; j++ ) ptcl_multi[i][j].makeHardList(id_map); #endif PS::S32 n_col_tmp = 0; PS::S32 n_frag_tmp = 0; PS::F64 edisp_tmp = 0.; PS::F64 edisp_d_tmp = 0.; timeIntegrate_multi(ptcl_multi[i], 0., FPGrav::dt_tree, n_col_tmp, n_frag_tmp, edisp_tmp, edisp_d_tmp, collision_list_tmp); if ( n_col_tmp > 0 ){ #pragma omp critical { n_col += n_col_tmp; n_frag += n_frag_tmp; edisp += edisp_tmp; edisp_d += edisp_d_tmp; for ( PS::S32 j=0; j<n_col_tmp; j++ ) collision_list.push_back(collision_list_tmp.at(j)); for ( PS::S32 j=0; j<n_frag_tmp; j++ ) frag_list.push_back(std::make_pair(i, ptcl_multi[i].size()-n_frag_tmp+j)); } } PS::S32 sizei = ptcl_multi[i].size(); for ( PS::S32 j=0; j<sizei; j++ ) { ptcl_multi[i][j].n_cluster = ptcl_multi[i].size(); ptcl_multi[i][j].resetTime(); } for ( PS::S32 j=0; j<sizei-n_frag_tmp; j++ ){ std::pair<bool,PS::S32> adr = list_multi.at(i).at(j); PS::S32 id_loc = adr.second; if ( adr.first ) { if ( !ptcl_multi[i][j].isDead ) assert ( pp[id_loc].id == ptcl_multi[i][j].id ); pp[id_loc] = FPGrav(ptcl_multi[i][j]); pp[id_loc].neighbor = ptcl_multi[i][j].n_list.size(); } else { if ( !ptcl_multi[i][j].isDead ) assert ( ex_pp[id_loc].id == ptcl_multi[i][j].id ); ex_pp[id_loc] = FPGrav(ptcl_multi[i][j]); ex_pp[id_loc].neighbor = ptcl_multi[i][j].n_list.size(); } n_ptcl_loc ++; } } else { PS::S32 i = ii - list_multi.size(); //#pragma omp parallel for reduction(+:n_ptcl_loc) //for ( PS::S32 i=0; i<list_iso.size(); i++ ){ if ( pp[list_iso[i]].getEccentricity() < 0.8 && FPGrav::eps2 == 0. ){ timeIntegrateKepler_isolated(pp[list_iso[i]], (istep-1)*FPGrav::dt_tree, istep*FPGrav::dt_tree); } else { FPHard pi = FPHard(pp[list_iso[i]]); timeIntegrate_isolated(pi, 0., FPGrav::dt_tree); pi.resetTime(); pp[list_iso[i]] = FPGrav(pi); } n_ptcl_loc ++; pp[list_iso[i]].n_cluster = 1; } } return n_ptcl_loc; } #else template <class Tpsys, class Tpsys2, class NL, class NL2> inline PS::S32 HardSystem::timeIntegrate(Tpsys & pp, Tpsys2 & ex_pp, NL & NList, NL2 & ex_NList, const PS::S32 istep) { PS::S32 n_ptcl_loc = 0; const PS::S32 large_cluster_size = 64; PS::S32 size = list_multi.size(); std::vector<PS::S32> small_cluster, large_cluster; small_cluster.clear(); large_cluster.clear(); for ( PS::S32 i=0; i<size; i++ ){ if ( list_multi.at(i).size() < large_cluster_size ){ small_cluster.push_back(i); } else { large_cluster.push_back(i); } } PS::S32 n_small = small_cluster.size(); PS::S32 n_large = large_cluster.size(); PS::S32 n_all = n_small + list_iso.size(); /////////////////////////////////// /// Integrate Large Cluster /// /////////////////////////////////// for ( PS::S32 ii=0; ii<n_large; ii++ ){ PS::S32 i = large_cluster.at(ii); PS::S32 n_p = list_multi.at(i).size(); PS::S32 id_cluster = 0; std::map<PS::S32, PS::S32> id_map; ptcl_multi[i].clear(); id_map.clear(); assert( large_cluster_size <= n_p ); // Add Particle To Hard System ptcl_multi[i].reserve(n_p); for ( PS::S32 j=0; j<n_p; j++ ){ std::pair<bool,PS::S32> adr = list_multi.at(i).at(j); PS::S32 id_loc = adr.second; if ( adr.first ) { ptcl_multi[i].push_back(FPHard(pp[id_loc])); ptcl_multi[i][j].copyList(NList[id_loc]); } else { ptcl_multi[i].push_back(FPHard(ex_pp[id_loc])); ptcl_multi[i][j].copyList(ex_NList[id_loc]); } if ( j==0 ) id_cluster = ptcl_multi[i][j].id_cluster; assert ( ptcl_multi[i][j].id_cluster == id_cluster ); id_map[ptcl_multi[i][j].id] = j; } // Make Neighbor List #ifdef TEST_PTCL #pragma omp parallel for for ( PS::S32 j=0; j<n_p; j++ ) ptcl_multi[i][j].makeHardList(id_map, ptcl_multi[i]); #else #pragma omp parallel for for ( PS::S32 j=0; j<n_p; j++ ) ptcl_multi[i][j].makeHardList(id_map); #endif PS::S32 n_col_tmp = 0; PS::S32 n_frag_tmp = 0; PS::F64 edisp_tmp = 0.; PS::F64 edisp_d_tmp = 0.; std::vector<Collision> collision_list_tmp; timeIntegrate_multi_omp(ptcl_multi[i], 0., FPGrav::dt_tree, n_col_tmp, n_frag_tmp, edisp_tmp, edisp_d_tmp, collision_list_tmp); if ( n_col_tmp > 0 ){ n_col += n_col_tmp; n_frag += n_frag_tmp; edisp += edisp_tmp; edisp_d += edisp_d_tmp; for ( PS::S32 j=0; j<n_col_tmp; j++ ) collision_list.push_back(collision_list_tmp.at(j)); for ( PS::S32 j=0; j<n_frag_tmp; j++ ) frag_list.push_back(std::make_pair(i, ptcl_multi[i].size()-n_frag_tmp+j)); } PS::S32 sizei = ptcl_multi[i].size(); #pragma omp parallel for for ( PS::S32 j=0; j<sizei; j++ ) { ptcl_multi[i][j].n_cluster = ptcl_multi[i].size(); ptcl_multi[i][j].resetTime(); } #pragma omp parallel for reduction(+:n_ptcl_loc) for ( PS::S32 j=0; j<sizei-n_frag_tmp; j++ ){ std::pair<bool,PS::S32> adr = list_multi.at(i).at(j); PS::S32 id_loc = adr.second; if ( adr.first ) { if ( !ptcl_multi[i][j].isDead ) assert ( pp[id_loc].id == ptcl_multi[i][j].id ); pp[id_loc] = FPGrav(ptcl_multi[i][j]); pp[id_loc].neighbor = ptcl_multi[i][j].n_list.size(); } else { if ( !ptcl_multi[i][j].isDead ) assert ( ex_pp[id_loc].id == ptcl_multi[i][j].id ); ex_pp[id_loc] = FPGrav(ptcl_multi[i][j]); ex_pp[id_loc].neighbor = ptcl_multi[i][j].n_list.size(); } n_ptcl_loc ++; } } ////////////////////////////////////////////////////// /// Integrate Small Cluster & Isolated Particle /// ////////////////////////////////////////////////////// #pragma omp parallel for reduction(+:n_ptcl_loc) schedule (dynamic) for ( PS::S32 ii=0; ii<n_all; ii++ ){ if ( ii<n_small ){ PS::S32 i = small_cluster.at(ii); PS::S32 n_p = list_multi.at(i).size(); PS::S32 id_cluster = 0; std::map<PS::S32, PS::S32> id_map; ptcl_multi[i].clear(); id_map.clear(); assert( large_cluster_size > n_p ); // Add Particle To Hard System ptcl_multi[i].reserve(n_p); for ( PS::S32 j=0; j<n_p; j++ ){ std::pair<bool,PS::S32> adr = list_multi.at(i).at(j); PS::S32 id_loc = adr.second; if ( adr.first ) { ptcl_multi[i].push_back(FPHard(pp[id_loc])); ptcl_multi[i][j].copyList(NList[id_loc]); } else { ptcl_multi[i].push_back(FPHard(ex_pp[id_loc])); ptcl_multi[i][j].copyList(ex_NList[id_loc]); } if ( j==0 ) id_cluster = ptcl_multi[i][j].id_cluster; assert ( ptcl_multi[i][j].id_cluster == id_cluster ); id_map[ptcl_multi[i][j].id] = j; } // Make Neighbor List #ifdef TEST_PTCL for ( PS::S32 j=0; j<n_p; j++ ) ptcl_multi[i][j].makeHardList(id_map, ptcl_multi[i]); #else for ( PS::S32 j=0; j<n_p; j++ ) ptcl_multi[i][j].makeHardList(id_map); #endif PS::S32 n_col_tmp = 0; PS::S32 n_frag_tmp = 0; PS::F64 edisp_tmp = 0.; PS::F64 edisp_d_tmp = 0.; std::vector<Collision> collision_list_tmp; //timeIntegrate_multi(ptcl_multi[i], (istep-1)*FPGrav::dt_tree, istep*FPGrav::dt_tree, f, // n_col_tmp, n_frag_tmp, edisp_tmp, edisp_d_tmp, collision_list_tmp); timeIntegrate_multi(ptcl_multi[i], 0., FPGrav::dt_tree, n_col_tmp, n_frag_tmp, edisp_tmp, edisp_d_tmp, collision_list_tmp); if ( n_col_tmp > 0 ){ #pragma omp critical { n_col += n_col_tmp; n_frag += n_frag_tmp; edisp += edisp_tmp; edisp_d += edisp_d_tmp; for ( PS::S32 j=0; j<n_col_tmp; j++ ) collision_list.push_back(collision_list_tmp.at(j)); for ( PS::S32 j=0; j<n_frag_tmp; j++ ) frag_list.push_back(std::make_pair(i, ptcl_multi[i].size()-n_frag_tmp+j)); } } PS::S32 sizei = ptcl_multi[i].size(); for ( PS::S32 j=0; j<sizei; j++ ) { ptcl_multi[i][j].n_cluster = ptcl_multi[i].size(); ptcl_multi[i][j].resetTime(); } for ( PS::S32 j=0; j<sizei-n_frag_tmp; j++ ){ std::pair<bool,PS::S32> adr = list_multi.at(i).at(j); PS::S32 id_loc = adr.second; if ( adr.first ) { if ( !ptcl_multi[i][j].isDead ) assert ( pp[id_loc].id == ptcl_multi[i][j].id ); pp[id_loc] = FPGrav(ptcl_multi[i][j]); pp[id_loc].neighbor = ptcl_multi[i][j].n_list.size(); } else { if ( !ptcl_multi[i][j].isDead ) assert ( ex_pp[id_loc].id == ptcl_multi[i][j].id ); ex_pp[id_loc] = FPGrav(ptcl_multi[i][j]); ex_pp[id_loc].neighbor = ptcl_multi[i][j].n_list.size(); } n_ptcl_loc ++; } } else { PS::S32 i = ii - n_small; if ( pp[list_iso[i]].getEccentricity() < 0.8 && FPGrav::eps2 == 0. ){ timeIntegrateKepler_isolated(pp[list_iso[i]], (istep-1)*FPGrav::dt_tree, istep*FPGrav::dt_tree); } else { FPHard pi = FPHard(pp[list_iso[i]]); timeIntegrate_isolated(pi, 0., FPGrav::dt_tree); //timeIntegrate_isolated(pi, (istep-1)*FPGrav::dt_tree, istep*FPGrav::dt_tree); pi.resetTime(); pp[list_iso[i]] = FPGrav(pi); } n_ptcl_loc ++; pp[list_iso[i]].n_cluster = 1; } } return n_ptcl_loc; } #endif inline void HardSystem::rewriteFragmentID(PS::S32 * & id_frag_list, Collision * & col_list, PS::S32 n_col_tot, PS::S32 n_frag_tot, PS::S32 & id_next) { std::map<PS::S32, PS::S32> id_old2new; id_old2new.clear(); for ( PS::S32 i=0; i<n_col_tot; i++ ){ PS::S32 n_fragi = col_list[i].getNumberOfFragment(); PS::S32 id_fragi = col_list[i].getFragmentID(); if ( n_fragi == 0 ) continue; for ( PS::S32 j=0; j<n_fragi; j++ ){ id_old2new[id_fragi - j] = id_next; id_next ++; } } for ( PS::S32 i=0; i<n_frag_tot; i++ ) if ( id_frag_list[i] < 0 ) id_frag_list[i] = id_old2new.at(id_frag_list[i]); for ( PS::S32 i=0; i<n_col_tot; i++ ) col_list[i].setNewFragmentID(id_old2new); } template <class Tpsys, class Tpsys2> inline PS::S32 HardSystem::addFragment2ParticleSystem(Tpsys & pp, Tpsys2 & ex_pp, PS::S32 & id_next, std::ofstream & fp) { const PS::S32 n_proc = PS::Comm::getNumberOfProc(); PS::S32 * n_col_list = nullptr; PS::S32 * n_frag_list = nullptr; Collision * col_list_tot = nullptr; PS::S32 * id_frag_list = nullptr; PS::S32 * id_frag_loc = nullptr; PS::S32 * col_recv = nullptr; PS::S32 * frag_recv = nullptr; id_frag_loc = new PS::S32[n_frag]; for ( PS::S32 i=0; i<n_frag; i++ ){ std::pair<PS::S32, PS::S32> adr = frag_list.at(i); id_frag_loc[i] = ptcl_multi[adr.first][adr.second].id; } if ( PS::Comm::getRank() == 0 ){ n_col_list = new PS::S32[n_proc]; n_frag_list = new PS::S32[n_proc]; col_recv = new PS::S32[n_proc]; frag_recv = new PS::S32[n_proc]; } // Send Number of Collision & Fragments #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Gather(&n_col, 1, PS::GetDataType(n_col), n_col_list, 1, PS::GetDataType(*n_col_list), 0, MPI_COMM_WORLD); MPI_Gather(&n_frag, 1, PS::GetDataType(n_frag), n_frag_list, 1, PS::GetDataType(*n_frag_list), 0, MPI_COMM_WORLD); #else n_col_list[0] = n_col; n_frag_list[0] = n_frag; #endif //PS::Comm::gather(&n_col, 1, n_col_list); //PS::Comm::gather(&n_frag, 1, n_frag_list); PS::S32 n_col_tot = 0; PS::S32 n_frag_tot = 0; if ( PS::Comm::getRank() == 0 ){ PS::S32 tmp_col = 0; PS::S32 tmp_frag = 0; for ( PS::S32 i=0; i<n_proc; i++ ){ col_recv[i] = tmp_col; frag_recv[i] = tmp_frag; tmp_col += n_col_list[i]; tmp_frag += n_frag_list[i]; } //col_recv[n_proc] = tmp_col; //frag_recv[n_proc] = tmp_frag; n_col_tot = tmp_col; n_frag_tot = tmp_frag; col_list_tot = new Collision[n_col_tot]; id_frag_list = new PS::S32[n_frag_tot]; } // Send Collision Information & Fragments ID #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Gatherv(&collision_list[0], n_col, PS::GetDataType(collision_list[0]), col_list_tot, n_col_list, col_recv, PS::GetDataType(*col_list_tot), 0, MPI_COMM_WORLD); MPI_Gatherv(id_frag_loc, n_frag, PS::GetDataType(*id_frag_loc), id_frag_list, n_frag_list, frag_recv, PS::GetDataType(*id_frag_list), 0, MPI_COMM_WORLD); #else for(PS::S32 i=0; i<n_col; i++) col_list_tot[i] = collision_list[i]; for(PS::S32 i=0; i<n_frag; i++) id_frag_list[i] = id_frag_loc[i]; #endif //PS::Comm::gatherV(&collision_list[0], n_col, col_list_tot, n_col_list, col_recv); //PS::Comm::gatherV(id_frag_loc, n_frag, id_frag_list, n_frag_list, frag_recv); // Rewrite Fragments ID if ( PS::Comm::getRank() == 0 ){ rewriteFragmentID(id_frag_list, col_list_tot, n_col_tot, n_frag_tot, id_next); } // Return Fragments ID #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Scatterv(id_frag_list, n_frag_list, frag_recv, PS::GetDataType(*id_frag_list), id_frag_loc, n_frag, PS::GetDataType(*id_frag_loc), 0, MPI_COMM_WORLD); #else for(int i=0; i<n_frag_list[0]; i++) id_frag_loc[i] = id_frag_list[i]; #endif //PS::Comm::scatterV(id_frag_list, n_frag_list, frag_recv, id_frag_loc, n_frag); assert( (PS::S32)(frag_list.size()) == n_frag ); for ( PS::S32 i=0; i<n_frag; i++ ){ std::pair<PS::S32, PS::S32> adr = frag_list.at(i); if ( ptcl_multi[adr.first][adr.second].isMerged ) { PS::S32 sizef = list_multi.at(adr.first).size(); for ( PS::S32 j=0; j<sizef; j++ ) { if ( ptcl_multi[adr.first][j].id == ptcl_multi[adr.first][adr.second].id && j != adr.second ) { std::pair<bool,PS::S32> adr_j = list_multi.at(adr.first).at(j); if ( adr_j.first ) { pp[adr_j.second].id = id_frag_loc[i]; assert ( pp[adr_j.second].isDead ); } else { ex_pp[adr_j.second].id = id_frag_loc[i]; assert ( ex_pp[adr_j.second].isDead ); } ptcl_multi[adr.first][j].id = id_frag_loc[i]; assert ( ptcl_multi[adr.first][j].isDead ); } } } ptcl_multi[adr.first][adr.second].id = id_frag_loc[i]; pp.addOneParticle(ptcl_multi[adr.first][adr.second]); assert( ptcl_multi[adr.first][adr.second].time_c == 0. ); } PS::Comm::broadcast(&id_next, 1); if ( PS::Comm::getRank() == 0 ){ // Output Collision Information for ( PS::S32 i=0; i<n_col_tot; i++ ) col_list_tot[i].write2File(fp); delete [] n_col_list; delete [] n_frag_list; delete [] col_list_tot; delete [] id_frag_list; delete [] col_recv; delete [] frag_recv; } delete [] id_frag_loc; return n_frag_tot; }
activations.c
#include "activations.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> char *get_activation_string(ACTIVATION a) { switch(a){ case LOGISTIC: return "logistic"; case LOGGY: return "loggy"; case RELU: return "relu"; case ELU: return "elu"; case SELU: return "selu"; case RELIE: return "relie"; case RAMP: return "ramp"; case LINEAR: return "linear"; case TANH: return "tanh"; case PLSE: return "plse"; case LEAKY: return "leaky"; case STAIR: return "stair"; case HARDTAN: return "hardtan"; case LHTAN: return "lhtan"; default: break; } return "relu"; } ACTIVATION get_activation(char *s) { if (strcmp(s, "logistic")==0) return LOGISTIC; if (strcmp(s, "loggy")==0) return LOGGY; if (strcmp(s, "relu")==0) return RELU; if (strcmp(s, "elu")==0) return ELU; if (strcmp(s, "selu") == 0) return SELU; if (strcmp(s, "relie")==0) return RELIE; if (strcmp(s, "plse")==0) return PLSE; if (strcmp(s, "hardtan")==0) return HARDTAN; if (strcmp(s, "lhtan")==0) return LHTAN; if (strcmp(s, "linear")==0) return LINEAR; if (strcmp(s, "ramp")==0) return RAMP; if (strcmp(s, "leaky")==0) return LEAKY; if (strcmp(s, "tanh")==0) return TANH; if (strcmp(s, "stair")==0) return STAIR; fprintf(stderr, "Couldn't find activation function %s, going with ReLU\n", s); return RELU; } float activate(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_activate(x); case LOGISTIC: return logistic_activate(x); case LOGGY: return loggy_activate(x); case RELU: return relu_activate(x); case ELU: return elu_activate(x); case SELU: return selu_activate(x); case RELIE: return relie_activate(x); case RAMP: return ramp_activate(x); case LEAKY: return leaky_activate(x); case TANH: return tanh_activate(x); case PLSE: return plse_activate(x); case STAIR: return stair_activate(x); case HARDTAN: return hardtan_activate(x); case LHTAN: return lhtan_activate(x); } return 0; } void activate_array(float *x, const int n, const ACTIVATION a) { if(a == LINEAR){ /* Do nothing */ }else if(a == LEAKY){ #pragma omp parallel for for(int i = 0; i < n; ++i){ x[i] = leaky_activate(x[i]); } }else if(a == LOGISTIC){ #pragma omp parallel for for(int i = 0; i < n; ++i){ x[i] = logistic_activate(x[i]); } }else{ #pragma omp parallel for for(int i = 0; i < n; ++i){ x[i] = activate(x[i], a); } } } float gradient(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_gradient(x); case LOGISTIC: return logistic_gradient(x); case LOGGY: return loggy_gradient(x); case RELU: return relu_gradient(x); case ELU: return elu_gradient(x); case SELU: return selu_gradient(x); case RELIE: return relie_gradient(x); case RAMP: return ramp_gradient(x); case LEAKY: return leaky_gradient(x); case TANH: return tanh_gradient(x); case PLSE: return plse_gradient(x); case STAIR: return stair_gradient(x); case HARDTAN: return hardtan_gradient(x); case LHTAN: return lhtan_gradient(x); } return 0; } void gradient_array(const float *x, const int n, const ACTIVATION a, float *delta) { int i; for(i = 0; i < n; ++i){ delta[i] *= gradient(x[i], a); } }
pageRank.c
// ----------------------------------------------------------------------------- // // "00_AccelGraph" // // ----------------------------------------------------------------------------- // Copyright (c) 2014-2019 All rights reserved // ----------------------------------------------------------------------------- // Author : Abdullah Mughrabi // Email : atmughra@ncsu.edu||atmughrabi@gmail.com // File : pageRank.c // Create : 2019-09-28 14:41:30 // Revise : 2019-09-28 15:34:11 // Editor : Abdullah Mughrabi // ----------------------------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <math.h> #include <omp.h> #include "timer.h" #include "myMalloc.h" #include "boolean.h" #include "arrayQueue.h" #include "bitmap.h" #include "worklist.h" #include "graphConfig.h" #include "fixedPoint.h" #include "quantization.h" #include "reorder.h" #include "graphCSR.h" #include "graphGrid.h" #include "graphAdjArrayList.h" #include "graphAdjLinkedList.h" #include "pageRank.h" // ******************************************************************************************** // *************** Stats DataStructure ************** // ******************************************************************************************** struct PageRankStats *newPageRankStatsGraphCSR(struct GraphCSR *graph) { uint32_t v; struct PageRankStats *stats = (struct PageRankStats *) my_malloc(sizeof(struct PageRankStats)); stats->damp = Damp; stats->base_pr = (1.0f - stats->damp); stats->iterations = 0; stats->num_vertices = graph->num_vertices; stats->time_total = 0.0; stats->error_total = 0.0; stats->realRanks = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));; stats->pageRanks = (float *) my_malloc(graph->num_vertices * sizeof(float));; #pragma omp parallel for default(none) private(v) shared(stats) for(v = 0; v < stats->num_vertices; v++) { stats->pageRanks[v] = stats->base_pr; stats->realRanks[v] = v; } return stats; } struct PageRankStats *newPageRankStatsGraphGrid(struct GraphGrid *graph) { uint32_t v; struct PageRankStats *stats = (struct PageRankStats *) my_malloc(sizeof(struct PageRankStats)); stats->damp = Damp; stats->base_pr = (1.0f - stats->damp); stats->iterations = 0; stats->num_vertices = graph->num_vertices; stats->time_total = 0.0; stats->error_total = 0.0; stats->realRanks = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));; stats->pageRanks = (float *) my_malloc(graph->num_vertices * sizeof(float));; #pragma omp parallel for default(none) private(v) shared(stats) for(v = 0; v < stats->num_vertices; v++) { stats->pageRanks[v] = stats->base_pr; stats->realRanks[v] = v; } return stats; } struct PageRankStats *newPageRankStatsGraphAdjArrayList(struct GraphAdjArrayList *graph) { uint32_t v; struct PageRankStats *stats = (struct PageRankStats *) my_malloc(sizeof(struct PageRankStats)); stats->damp = Damp; stats->base_pr = (1.0f - stats->damp); stats->iterations = 0; stats->num_vertices = graph->num_vertices; stats->time_total = 0.0; stats->error_total = 0.0; stats->realRanks = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));; stats->pageRanks = (float *) my_malloc(graph->num_vertices * sizeof(float));; #pragma omp parallel for default(none) private(v) shared(stats) for(v = 0; v < stats->num_vertices; v++) { stats->pageRanks[v] = stats->base_pr; stats->realRanks[v] = v; } return stats; } struct PageRankStats *newPageRankStatsGraphAdjLinkedList(struct GraphAdjLinkedList *graph) { uint32_t v; struct PageRankStats *stats = (struct PageRankStats *) my_malloc(sizeof(struct PageRankStats)); stats->damp = Damp; stats->base_pr = (1.0f - stats->damp); stats->iterations = 0; stats->num_vertices = graph->num_vertices; stats->time_total = 0.0; stats->error_total = 0.0; stats->realRanks = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));; stats->pageRanks = (float *) my_malloc(graph->num_vertices * sizeof(float));; #pragma omp parallel for default(none) private(v) shared(stats) for(v = 0; v < stats->num_vertices; v++) { stats->pageRanks[v] = stats->base_pr; stats->realRanks[v] = v; } return stats; } void freePageRankStats(struct PageRankStats *stats) { if(stats) { if(stats->realRanks) free(stats->realRanks); if(stats->pageRanks) free(stats->pageRanks); free(stats); } } // ******************************************************************************************** // *************** Auxilary functions ************** // ******************************************************************************************** void addAtomicFloat(float *num, float value) { float newV, oldV; uint32_t *lnewV; uint32_t *loldV; do { oldV = *num; newV = oldV + value; loldV = (uint32_t *)&oldV; lnewV = (uint32_t *)&newV; } while(!__sync_bool_compare_and_swap((uint32_t *)num, *(loldV), *(lnewV))); } void addAtomicDouble(double *num, double value) { double newV, oldV; uint64_t *lnewV; uint64_t *loldV; do { oldV = *num; newV = oldV + value; loldV = (uint64_t *)&oldV; lnewV = (uint64_t *)&newV; } while(!__sync_bool_compare_and_swap((uint64_t *)num, *(loldV), *(lnewV))); } void setAtomic(uint64_t *num, uint64_t value) { uint64_t newV, oldV; do { oldV = *num; newV = value; } while(!__sync_bool_compare_and_swap(num, oldV, newV)); } void addAtomicFixedPoint(uint64_t *num, uint64_t value) { uint64_t newV, oldV; do { oldV = *num; newV = oldV + value; } while(!__sync_bool_compare_and_swap(num, oldV, newV)); } void pageRankPrint(float *pageRankArray, uint32_t num_vertices) { uint32_t v; for(v = 0; v < num_vertices; v++) { printf("Rank[%d]=%f \n", v, pageRankArray[v]); } } // ******************************************************************************************** // *************** GRID DataStructure ************** // ******************************************************************************************** // function STREAMVERTICES(Fv,F) // Sum = 0 // for each vertex do // if F(vertex) then // Sum += Fv(edge) // end if // end for // return Sum // end function // function STREAMEDGES(Fe,F) // Sum = 0 // for each active block do >> block with active edges // for each edge ∈ block do // if F(edge.source) then // Sum += Fe(edge) // end if // end for // end for // return Sum // end function //we assume that the edges are not sorted in each partition struct PageRankStats *pageRankGraphGrid(struct Arguments *arguments, struct GraphGrid *graph) { struct PageRankStats *stats = NULL; switch (arguments->pushpull) { case 0: // push stats = pageRankPullRowGraphGrid(arguments, graph); break; case 1: // pull stats = pageRankPushColumnGraphGrid(arguments, graph); break; case 2: // pull stats = pageRankPullRowFixedPointGraphGrid(arguments, graph); break; case 3: // push stats = pageRankPushColumnFixedPointGraphGrid(arguments, graph); break; default:// pull stats = pageRankPullRowGraphGrid(arguments, graph); break; } return stats; } struct PageRankStats *pageRankPullRowGraphGrid(struct Arguments *arguments, struct GraphGrid *graph) { double error_total = 0.0; uint32_t v; uint32_t activeVertices = 0; // float init_pr = 1.0f / (float)graph->num_vertices; uint32_t totalPartitions = graph->grid->num_partitions; struct PageRankStats *stats = newPageRankStatsGraphGrid(graph); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float)); float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Row (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0.0f; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { error_total = 0; activeVertices = 0; Start(timer_inner); #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->grid->out_degree[v]) riDividedOnDiClause[v] = stats->pageRanks[v] / graph->grid->out_degree[v]; else riDividedOnDiClause[v] = 0.0f; } // pageRankStreamEdgesGraphGridRowWise(graph, riDividedOnDiClause, pageRanksNext); uint32_t i; // #pragma omp parallel for private(i) for (i = 0; i < totalPartitions; ++i) // iterate over partitions rowwise { uint32_t j; #pragma omp parallel for private(j) for (j = 0; j < totalPartitions; ++j) { uint32_t k; uint32_t src; uint32_t dest; struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j]; for (k = 0; k < partition->num_edges; ++k) { src = partition->edgeList->edges_array_src[k]; dest = partition->edgeList->edges_array_dest[k]; // #pragma omp atomic update // __sync_fetch_and_add(&pageRanksNext[dest],riDividedOnDiClause[src]); // addAtomicFloat(float *num, float value) // #pragma omp atomic update pageRanksNext[dest] += riDividedOnDiClause[src]; } } } #pragma omp parallel for private(v) shared(arguments, pageRanksNext, stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]); stats->pageRanks[v] = nextPageRank; pageRanksNext[v] = 0.0f; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // printf(" -----------------------------------------------------\n"); // printf("| %-10s | %-8lf | %-15s | %-9s | \n","PR Sum ",sum, stats->iterations, stats->time_total); // printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(timer); free(timer_inner); free(pageRanksNext); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankPullRowFixedPointGraphGrid(struct Arguments *arguments, struct GraphGrid *graph) { double error_total = 0.0; uint32_t v; uint32_t activeVertices = 0; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphGrid(graph); uint32_t totalPartitions = graph->grid->num_partitions; struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); uint64_t *riDividedOnDiClause = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Row FP (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { error_total = 0; activeVertices = 0; Start(timer_inner); #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->grid->out_degree[v]) riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->grid->out_degree[v]); else riDividedOnDiClause[v] = 0.0f; } // pageRankStreamEdgesGraphGridRowWise(graph, riDividedOnDiClause, pageRanksNext); uint32_t i; // #pragma omp parallel for private(i) for (i = 0; i < totalPartitions; ++i) // iterate over partitions rowwise { uint32_t j; #pragma omp parallel for private(j) for (j = 0; j < totalPartitions; ++j) { uint32_t k; uint32_t src; uint32_t dest; struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j]; for (k = 0; k < partition->num_edges; ++k) { src = partition->edgeList->edges_array_src[k]; dest = partition->edgeList->edges_array_dest[k]; // #pragma omp atomic update pageRanksNext[dest] += riDividedOnDiClause[src]; } } } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v])); stats->pageRanks[v] = nextPageRank; pageRanksNext[v] = 0.0f; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // printf(" -----------------------------------------------------\n"); // printf("| %-10s | %-8lf | %-15s | %-9s | \n","PR Sum ",sum, stats->iterations, stats->time_total); // printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(timer); free(timer_inner); free(pageRanksNext); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } /******************************************************************/ struct PageRankStats *pageRankPushColumnGraphGrid(struct Arguments *arguments, struct GraphGrid *graph) { double error_total = 0.0; uint32_t v; uint32_t activeVertices = 0; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphGrid(graph); uint32_t totalPartitions = graph->grid->num_partitions; struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float)); float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Col (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0.0f; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { error_total = 0; activeVertices = 0; Start(timer_inner); #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->grid->out_degree[v]) riDividedOnDiClause[v] = stats->pageRanks[v] / graph->grid->out_degree[v]; else riDividedOnDiClause[v] = 0.0f; } // pageRankStreamEdgesGraphGridRowWise(graph, riDividedOnDiClause, pageRanksNext); uint32_t j; #pragma omp parallel for private(j) for (j = 0; j < totalPartitions; ++j) { uint32_t i; // #pragma omp parallel for private(i) // iterate over partitions columnwise for (i = 0; i < totalPartitions; ++i) { uint32_t k; uint32_t src; uint32_t dest; struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j]; for (k = 0; k < partition->num_edges; ++k) { src = partition->edgeList->edges_array_src[k]; dest = partition->edgeList->edges_array_dest[k]; // #pragma omp atomic update pageRanksNext[dest] += riDividedOnDiClause[src]; // addAtomicFloat(&pageRanksNext[dest] , riDividedOnDiClause[src]); } } } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]); stats->pageRanks[v] = nextPageRank; pageRanksNext[v] = 0.0f; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // printf(" -----------------------------------------------------\n"); // printf("| %-10s | %-8lf | %-15s | %-9s | \n","PR Sum ",sum, stats->iterations, stats->time_total); // printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(timer); free(timer_inner); free(pageRanksNext); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankPushColumnFixedPointGraphGrid(struct Arguments *arguments, struct GraphGrid *graph) { double error_total = 0.0; uint32_t v; uint32_t activeVertices = 0; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphGrid(graph); uint32_t totalPartitions = graph->grid->num_partitions; struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); uint64_t *riDividedOnDiClause = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Col FP (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0.0f; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { error_total = 0; activeVertices = 0; Start(timer_inner); #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->grid->out_degree[v]) riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->grid->out_degree[v]); else riDividedOnDiClause[v] = 0.0f; } // pageRankStreamEdgesGraphGridRowWise(graph, riDividedOnDiClause, pageRanksNext); uint32_t j; #pragma omp parallel for private(j) for (j = 0; j < totalPartitions; ++j) // iterate over partitions columnwise { uint32_t i; for (i = 0; i < totalPartitions; ++i) { uint32_t k; uint32_t src; uint32_t dest; struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j]; for (k = 0; k < partition->num_edges; ++k) { src = partition->edgeList->edges_array_src[k]; dest = partition->edgeList->edges_array_dest[k]; // #pragma omp atomic update pageRanksNext[dest] += riDividedOnDiClause[src]; // addAtomicFloat(&pageRanksNext[dest] , riDividedOnDiClause[src]); } } } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v])); stats->pageRanks[v] = nextPageRank; pageRanksNext[v] = 0.0f; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // printf(" -----------------------------------------------------\n"); // printf("| %-10s | %-8lf | %-15s | %-9s | \n","PR Sum ",sum, stats->iterations, stats->time_total); // printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(timer); free(timer_inner); free(pageRanksNext); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } // ******************************************************************************************** // *************** CSR DataStructure ************** // ******************************************************************************************** struct PageRankStats *pageRankGraphCSR(struct Arguments *arguments, struct GraphCSR *graph) { struct PageRankStats *stats = NULL; switch (arguments->pushpull) { case 0: // pull stats = pageRankPullGraphCSR(arguments, graph); break; case 1: // push stats = pageRankPushGraphCSR(arguments, graph); break; case 2: // pull 64bit FP stats = pageRankPullFixedPoint64BitGraphCSR(arguments, graph); break; case 3: // push stats = pageRankPushFixedPointGraphCSR(arguments, graph); break; case 4: // pull 32bit Quant stats = pageRankPullQuant32BitGraphCSR(arguments, graph); break; case 5: // push stats = pageRankPushQuantGraphCSR(arguments, graph); break; case 6: // pull stats = pageRankDataDrivenPullGraphCSR(arguments, graph); break; case 7: // push stats = pageRankDataDrivenPushGraphCSR(arguments, graph); break; case 8: // pullpush stats = pageRankDataDrivenPullPushGraphCSR(arguments, graph); break; case 9: // pull 32bit FP stats = pageRankPullFixedPoint32BitGraphCSR(arguments, graph); break; case 10: // pull 16bit FP stats = pageRankPullFixedPoint16BitGraphCSR(arguments, graph); break; case 11: // pull 8bit FP stats = pageRankPullFixedPoint8BitGraphCSR(arguments, graph); break; case 12: // pull 16bit Quant stats = pageRankPullQuant16BitGraphCSR(arguments, graph); break; case 13: // pull 8bit Quant stats = pageRankPullQuant8BitGraphCSR(arguments, graph); break; // case 9: // push // pageRankDataDrivenPullFixedPointGraphCSR(arguments, graph); // break; // case 10: // pull // pageRankDataDrivenPushFixedPointGraphCSR(arguments, graph); // break; default:// pull stats = pageRankPullGraphCSR(arguments, graph); break; } return stats; } // topoligy driven approach struct PageRankStats *pageRankPullGraphCSR(struct Arguments *arguments, struct GraphCSR *graph) { double error_total = 0.0; uint32_t j; uint32_t v; uint32_t u; uint32_t degree; uint32_t edge_idx; uint32_t activeVertices = 0; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphCSR(graph); struct Vertex *vertices = NULL; uint32_t *sorted_edges_array = NULL; struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float)); float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); #if DIRECTED vertices = graph->inverse_vertices; sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest; #else vertices = graph->vertices; sorted_edges_array = graph->sorted_edges_array->edges_array_dest; #endif printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Pull (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { error_total = 0; activeVertices = 0; Start(timer_inner); #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices->out_degree[v]) riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v]; else riDividedOnDiClause[v] = 0.0f; } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads) for(v = 0; v < graph->num_vertices; v++) { float nodeIncomingPR = 0.0f; degree = vertices->out_degree[v]; edge_idx = vertices->edges_idx[v]; for(j = edge_idx ; j < (edge_idx + degree) ; j++) { u = EXTRACT_VALUE(sorted_edges_array[j]); nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree; } pageRanksNext[v] = nodeIncomingPR; } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]); stats->pageRanks[v] = nextPageRank; pageRanksNext[v] = 0.0f; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); free(timer); free(timer_inner); free(pageRanksNext); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankPushGraphCSR(struct Arguments *arguments, struct GraphCSR *graph) { double error_total = 0.0; // uint32_t i; uint32_t v; // double error = 0; uint32_t activeVertices = 0; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphCSR(graph); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float)); float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Push (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { Start(timer_inner); error_total = 0; activeVertices = 0; #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph) for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices->out_degree[v]) riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v]; else riDividedOnDiClause[v] = 0.0f; } #pragma omp parallel for default(none) private(v) shared(stats,graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads) for(v = 0; v < graph->num_vertices; v++) { uint32_t degree = graph->vertices->out_degree[v]; uint32_t edge_idx = graph->vertices->edges_idx[v]; // uint32_t tid = omp_get_thread_num(); uint32_t j; for(j = edge_idx ; j < (edge_idx + degree) ; j++) { uint32_t u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]); #pragma omp atomic update pageRanksNext[u] += riDividedOnDiClause[v]; } } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]); stats->pageRanks[v] = nextPageRank; pageRanksNext[v] = 0; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(timer); free(timer_inner); free(pageRanksNext); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } // topoligy driven approach struct PageRankStats *pageRankPullFixedPoint64BitGraphCSR(struct Arguments *arguments, struct GraphCSR *graph) { double error_total = 0.0; uint32_t j; uint32_t v; uint32_t u; uint32_t degree; uint32_t edge_idx; uint32_t activeVertices = 0; // float init_pr = 1.0f / (float)graph->num_vertices; // uint64_t stats->base_pr_fp = FloatToFixed64(stats->base_pr); // uint64_t epsilon_fp = DoubleToFixed64(arguments->epsilon); // uint64_t num_vertices_fp = UInt32ToFixed64(); struct PageRankStats *stats = newPageRankStatsGraphCSR(graph); struct Vertex *vertices = NULL; uint32_t *sorted_edges_array = NULL; struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); #if DIRECTED vertices = graph->inverse_vertices; sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest; #else vertices = graph->vertices; sorted_edges_array = graph->sorted_edges_array->edges_array_dest; #endif uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); uint64_t *riDividedOnDiClause = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); // uint64_t* outDegreesFP = (uint64_t*) my_malloc(graph->num_vertices*sizeof(uint64_t)); // uint64_t* pageRanksFP = (uint64_t*) my_malloc(graph->num_vertices*sizeof(uint64_t)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Pull FP_64 (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { error_total = 0; activeVertices = 0; Start(timer_inner); #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices->out_degree[v]) riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices->out_degree[v]); else riDividedOnDiClause[v] = 0.0f; } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads) for(v = 0; v < graph->num_vertices; v++) { degree = vertices->out_degree[v]; edge_idx = vertices->edges_idx[v]; for(j = edge_idx ; j < (edge_idx + degree) ; j++) { u = EXTRACT_VALUE(sorted_edges_array[j]); pageRanksNext[v] += riDividedOnDiClause[u]; } } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v])); stats->pageRanks[v] = nextPageRank; // pageRanksFP[v] = FloatToFixed(nextPageRank); pageRanksNext[v] = 0; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(timer); free(timer_inner); free(riDividedOnDiClause); free(pageRanksNext); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankPullFixedPoint32BitGraphCSR(struct Arguments *arguments, struct GraphCSR *graph) { double error_total = 0.0; uint32_t j; uint32_t v; uint32_t u; uint32_t degree; uint32_t edge_idx; uint32_t activeVertices = 0; // float init_pr = 1.0f / (float)graph->num_vertices; // uint64_t stats->base_pr_fp = FloatToFixed64(stats->base_pr); // uint64_t epsilon_fp = DoubleToFixed64(arguments->epsilon); // uint64_t num_vertices_fp = UInt32ToFixed64(); struct PageRankStats *stats = newPageRankStatsGraphCSR(graph); struct Vertex *vertices = NULL; uint32_t *sorted_edges_array = NULL; struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); #if DIRECTED vertices = graph->inverse_vertices; sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest; #else vertices = graph->vertices; sorted_edges_array = graph->sorted_edges_array->edges_array_dest; #endif uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); uint32_t *riDividedOnDiClause = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t)); // uint64_t* outDegreesFP = (uint64_t*) my_malloc(graph->num_vertices*sizeof(uint64_t)); // uint64_t* pageRanksFP = (uint64_t*) my_malloc(graph->num_vertices*sizeof(uint64_t)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Pull FP_32 (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { error_total = 0; activeVertices = 0; Start(timer_inner); #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices->out_degree[v]) riDividedOnDiClause[v] = FloatToFixed32(stats->pageRanks[v] / graph->vertices->out_degree[v]); else riDividedOnDiClause[v] = 0.0f; } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads) for(v = 0; v < graph->num_vertices; v++) { degree = vertices->out_degree[v]; edge_idx = vertices->edges_idx[v]; for(j = edge_idx ; j < (edge_idx + degree) ; j++) { u = EXTRACT_VALUE(sorted_edges_array[j]); pageRanksNext[v] += riDividedOnDiClause[u]; } } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * Fixed32ToFloat(pageRanksNext[v])); stats->pageRanks[v] = nextPageRank; // pageRanksFP[v] = FloatToFixed(nextPageRank); pageRanksNext[v] = 0; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); free(timer); free(timer_inner); free(riDividedOnDiClause); free(pageRanksNext); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankPullFixedPoint16BitGraphCSR(struct Arguments *arguments, struct GraphCSR *graph) { double error_total = 0.0; uint32_t j; uint32_t v; uint32_t u; uint32_t degree; uint32_t edge_idx; uint32_t activeVertices = 0; // float init_pr = 1.0f / (float)graph->num_vertices; // uint64_t stats->base_pr_fp = FloatToFixed64(stats->base_pr); // uint64_t epsilon_fp = DoubleToFixed64(arguments->epsilon); // uint64_t num_vertices_fp = UInt32ToFixed64(); struct PageRankStats *stats = newPageRankStatsGraphCSR(graph); struct Vertex *vertices = NULL; uint32_t *sorted_edges_array = NULL; struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); #if DIRECTED vertices = graph->inverse_vertices; sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest; #else vertices = graph->vertices; sorted_edges_array = graph->sorted_edges_array->edges_array_dest; #endif uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); uint16_t *riDividedOnDiClause = (uint16_t *) my_malloc(graph->num_vertices * sizeof(uint16_t)); // uint64_t* outDegreesFP = (uint64_t*) my_malloc(graph->num_vertices*sizeof(uint64_t)); // uint64_t* pageRanksFP = (uint64_t*) my_malloc(graph->num_vertices*sizeof(uint64_t)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Pull FP_16 (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { error_total = 0; activeVertices = 0; Start(timer_inner); #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices->out_degree[v]) riDividedOnDiClause[v] = FloatToFixed16(stats->pageRanks[v] / graph->vertices->out_degree[v]); else riDividedOnDiClause[v] = 0.0f; } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads) for(v = 0; v < graph->num_vertices; v++) { degree = vertices->out_degree[v]; edge_idx = vertices->edges_idx[v]; for(j = edge_idx ; j < (edge_idx + degree) ; j++) { u = EXTRACT_VALUE(sorted_edges_array[j]); pageRanksNext[v] += riDividedOnDiClause[u]; } } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * Fixed16ToFloat(pageRanksNext[v])); stats->pageRanks[v] = nextPageRank; // pageRanksFP[v] = FloatToFixed(nextPageRank); pageRanksNext[v] = 0; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(timer); free(timer_inner); free(riDividedOnDiClause); free(pageRanksNext); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankPullFixedPoint8BitGraphCSR(struct Arguments *arguments, struct GraphCSR *graph) { double error_total = 0.0; uint32_t j; uint32_t v; uint32_t u; uint32_t degree; uint32_t edge_idx; uint32_t activeVertices = 0; // float init_pr = 1.0f / (float)graph->num_vertices; // uint64_t stats->base_pr_fp = FloatToFixed64(stats->base_pr); // uint64_t epsilon_fp = DoubleToFixed64(arguments->epsilon); // uint64_t num_vertices_fp = UInt32ToFixed64(); struct PageRankStats *stats = newPageRankStatsGraphCSR(graph); struct Vertex *vertices = NULL; uint32_t *sorted_edges_array = NULL; struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); #if DIRECTED vertices = graph->inverse_vertices; sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest; #else vertices = graph->vertices; sorted_edges_array = graph->sorted_edges_array->edges_array_dest; #endif uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); uint8_t *riDividedOnDiClause = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); // uint64_t* outDegreesFP = (uint64_t*) my_malloc(graph->num_vertices*sizeof(uint64_t)); // uint64_t* pageRanksFP = (uint64_t*) my_malloc(graph->num_vertices*sizeof(uint64_t)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Pull FP_8 (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { error_total = 0; activeVertices = 0; Start(timer_inner); #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices->out_degree[v]) riDividedOnDiClause[v] = FloatToFixed8(stats->pageRanks[v] / graph->vertices->out_degree[v]); else riDividedOnDiClause[v] = 0.0f; } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads) for(v = 0; v < graph->num_vertices; v++) { degree = vertices->out_degree[v]; edge_idx = vertices->edges_idx[v]; for(j = edge_idx ; j < (edge_idx + degree) ; j++) { u = EXTRACT_VALUE(sorted_edges_array[j]); pageRanksNext[v] += riDividedOnDiClause[u]; } } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * Fixed8ToFloat(pageRanksNext[v])); stats->pageRanks[v] = nextPageRank; // pageRanksFP[v] = FloatToFixed(nextPageRank); pageRanksNext[v] = 0; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); free(timer); free(timer_inner); free(riDividedOnDiClause); free(pageRanksNext); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankPushFixedPointGraphCSR(struct Arguments *arguments, struct GraphCSR *graph) { double error_total = 0.0; // uint32_t i; uint32_t v; // double error = 0; uint32_t activeVertices = 0; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphCSR(graph); // uint64_t stats->base_prFP = DoubleToFixed(stats->base_pr); // uint64_t stats->dampFP = DoubleToFixed(stats->damp); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); // uint32_t* pageRanksFP = (uint32_t*) my_malloc(graph->num_vertices*sizeof(uint32_t)); uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); uint64_t *riDividedOnDiClause = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Push FP (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph) for(v = 0; v < graph->num_vertices; v++) { // pageRanksFP[v]=stats->base_prFP; pageRanksNext[v] = 0; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { Start(timer_inner); error_total = 0; activeVertices = 0; #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph) for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices->out_degree[v]) { riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices->out_degree[v]); // riDividedOnDiClause[v] = DIVFixed64V1(pageRanksFP[v],UInt64ToFixed(graph->vertices[v].out_degree)); } else riDividedOnDiClause[v] = 0.0f; } #pragma omp parallel for default(none) schedule(dynamic, 1024) private(v) shared(stats,graph,pageRanksNext,riDividedOnDiClause) num_threads(arguments->ker_numThreads) for(v = 0; v < graph->num_vertices; v++) { uint32_t degree = graph->vertices->out_degree[v]; uint32_t edge_idx = graph->vertices->edges_idx[v]; // uint32_t tid = omp_get_thread_num(); uint32_t j; for(j = edge_idx ; j < (edge_idx + degree) ; j++) { uint32_t u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]); #pragma omp atomic update pageRanksNext[u] += riDividedOnDiClause[v]; } } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v])); stats->pageRanks[v] = nextPageRank; // pageRanksFP[v] = FloatToFixed(nextPageRank); pageRanksNext[v] = 0; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); free(timer); free(timer_inner); free(pageRanksNext); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } //done by mohannad Ibranim //v_0: No need for next iteration's quantization parameters. (eqn 1) struct PageRankStats *pageRankPullQuant32BitGraphCSR(struct Arguments *arguments, struct GraphCSR *graph) { //QUANT_SCALE = 32; uint32_t j; uint32_t v; uint32_t u; uint32_t degree; uint32_t edge_idx; uint32_t activeVertices = 0; double error_total = 0.0; struct PageRankStats *stats = newPageRankStatsGraphCSR(graph); struct Vertex *vertices = NULL; uint32_t *sorted_edges_array = NULL; struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); #if DIRECTED vertices = graph->inverse_vertices; sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest; #else vertices = graph->vertices; sorted_edges_array = graph->sorted_edges_array->edges_array_dest; #endif float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float)); float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); uint32_t *riDividedOnDiClause_quant = (uint32_t *)my_malloc(graph->num_vertices * sizeof(uint32_t)); printf(" -----------------------------------------------------\n"); printf("| %-30s %-19s| \n", "Starting Page Rank Pull Quant_32", "(tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { error_total = 0; activeVertices = 0; Start(timer_inner); #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices->out_degree[v]) riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v]; else riDividedOnDiClause[v] = 0.0f; } //1. Extract the quantization parameters from riDividedOnDiClause[] struct quant_params_32 rDivD_params; getMinMax_32(&rDivD_params, riDividedOnDiClause, graph->num_vertices); rDivD_params.scale = GetScale_32(rDivD_params.min, rDivD_params.max); rDivD_params.zero = 0; // printf("Iter %d quant parameters:\nMin = %.16f,\tMax = %.16f\nScale = %.24f,\tZero = %u\n", // stats->iterations,rDivD_params.min,rDivD_params.max,rDivD_params.scale,rDivD_params.zero); // printf(".........................................................\n"); //2. Quantize riDividedOnDiClause[] #pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,graph) for(v = 0; v < graph->num_vertices; v++) { riDividedOnDiClause_quant[v] = quantize_32(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero); } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads) for(v = 0; v < graph->num_vertices; v++) { uint64_t nodeIncomingPR = 0; degree = vertices->out_degree[v]; edge_idx = vertices->edges_idx[v]; for(j = edge_idx ; j < (edge_idx + degree) ; j++) { u = EXTRACT_VALUE(sorted_edges_array[j]); nodeIncomingPR += riDividedOnDiClause_quant[u]; } pageRanksNext[v] = rDivD_params.scale * nodeIncomingPR; } //uint64_t temp_degree = 0; #pragma omp parallel for private(v) shared(arguments,pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + stats->damp * pageRanksNext[v]; stats->pageRanks[v] = nextPageRank; pageRanksNext[v] = 0.0f; double error = fabs(nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; //temp_degree += vertices[v].in_degree; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); free(timer); free(timer_inner); free(pageRanksNext); free(riDividedOnDiClause); free(riDividedOnDiClause_quant); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankPullQuant16BitGraphCSR(struct Arguments *arguments, struct GraphCSR *graph) { //QUANT_SCALE = 32; uint32_t j; uint32_t v; uint32_t u; uint32_t degree; uint32_t edge_idx; uint32_t activeVertices = 0; double error_total = 0.0; struct PageRankStats *stats = newPageRankStatsGraphCSR(graph); struct Vertex *vertices = NULL; uint32_t *sorted_edges_array = NULL; struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); #if DIRECTED vertices = graph->inverse_vertices; sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest; #else vertices = graph->vertices; sorted_edges_array = graph->sorted_edges_array->edges_array_dest; #endif float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float)); float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); uint16_t *riDividedOnDiClause_quant = (uint16_t *)my_malloc(graph->num_vertices * sizeof(uint16_t)); printf(" -----------------------------------------------------\n"); printf("| %-30s %-19s| \n", "Starting Page Rank Pull Quant_16", "(tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { error_total = 0; activeVertices = 0; Start(timer_inner); #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices->out_degree[v]) riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v]; else riDividedOnDiClause[v] = 0.0f; } //1. Extract the quantization parameters from riDividedOnDiClause[] struct quant_params_16 rDivD_params; getMinMax_16(&rDivD_params, riDividedOnDiClause, graph->num_vertices); rDivD_params.scale = GetScale_16(rDivD_params.min, rDivD_params.max); rDivD_params.zero = 0; // printf("Iter %d quant parameters:\nMin = %.16f,\tMax = %.16f\nScale = %.24f,\tZero = %u\n", // stats->iterations,rDivD_params.min,rDivD_params.max,rDivD_params.scale,rDivD_params.zero); // printf(".........................................................\n"); //2. Quantize riDividedOnDiClause[] #pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,graph) for(v = 0; v < graph->num_vertices; v++) { riDividedOnDiClause_quant[v] = quantize_16(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero); } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads) for(v = 0; v < graph->num_vertices; v++) { uint64_t nodeIncomingPR = 0; degree = vertices->out_degree[v]; edge_idx = vertices->edges_idx[v]; for(j = edge_idx ; j < (edge_idx + degree) ; j++) { u = EXTRACT_VALUE(sorted_edges_array[j]); nodeIncomingPR += riDividedOnDiClause_quant[u]; } pageRanksNext[v] = rDivD_params.scale * nodeIncomingPR; } //uint64_t temp_degree = 0; #pragma omp parallel for private(v) shared(arguments,pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + stats->damp * pageRanksNext[v]; stats->pageRanks[v] = nextPageRank; pageRanksNext[v] = 0.0f; double error = fabs(nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; //temp_degree += vertices[v].in_degree; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); free(timer); free(timer_inner); free(pageRanksNext); free(riDividedOnDiClause); free(riDividedOnDiClause_quant); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankPullQuant8BitGraphCSR(struct Arguments *arguments, struct GraphCSR *graph) { //QUANT_SCALE = 32; uint32_t j; uint32_t v; uint32_t u; uint32_t degree; uint32_t edge_idx; uint32_t activeVertices = 0; double error_total = 0.0; struct PageRankStats *stats = newPageRankStatsGraphCSR(graph); struct Vertex *vertices = NULL; uint32_t *sorted_edges_array = NULL; struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); #if DIRECTED vertices = graph->inverse_vertices; sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest; #else vertices = graph->vertices; sorted_edges_array = graph->sorted_edges_array->edges_array_dest; #endif float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float)); float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); uint8_t *riDividedOnDiClause_quant = (uint8_t *)my_malloc(graph->num_vertices * sizeof(uint8_t)); printf(" -----------------------------------------------------\n"); printf("| %-30s %-19s| \n", "Starting Page Rank Pull Quant_8", "(tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { error_total = 0; activeVertices = 0; Start(timer_inner); #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices->out_degree[v]) riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v]; else riDividedOnDiClause[v] = 0.0f; } //1. Extract the quantization parameters from riDividedOnDiClause[] struct quant_params_8 rDivD_params; getMinMax_8(&rDivD_params, riDividedOnDiClause, graph->num_vertices); rDivD_params.scale = GetScale_8(rDivD_params.min, rDivD_params.max); rDivD_params.zero = 0; // printf("Iter %d quant parameters:\nMin = %.16f,\tMax = %.16f\nScale = %.24f,\tZero = %u\n", // stats->iterations,rDivD_params.min,rDivD_params.max,rDivD_params.scale,rDivD_params.zero); // printf(".........................................................\n"); //2. Quantize riDividedOnDiClause[] #pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,graph) for(v = 0; v < graph->num_vertices; v++) { riDividedOnDiClause_quant[v] = quantize_8(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero); } #pragma omp parallel for private(v,j,u,degree,edge_idx) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads) for(v = 0; v < graph->num_vertices; v++) { uint64_t nodeIncomingPR = 0; degree = vertices->out_degree[v]; edge_idx = vertices->edges_idx[v]; for(j = edge_idx ; j < (edge_idx + degree) ; j++) { u = EXTRACT_VALUE(sorted_edges_array[j]); nodeIncomingPR += riDividedOnDiClause_quant[u]; } //nodeIncomingPR -= (degree * rDivD_params.zero); pageRanksNext[v] = rDivD_params.scale * nodeIncomingPR; } //uint64_t temp_degree = 0; #pragma omp parallel for private(v) shared(arguments,pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + stats->damp * pageRanksNext[v]; stats->pageRanks[v] = nextPageRank; pageRanksNext[v] = 0.0f; double error = fabs(nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; //temp_degree += vertices[v].in_degree; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); free(timer); free(timer_inner); free(pageRanksNext); free(riDividedOnDiClause); free(riDividedOnDiClause_quant); stats->error_total = error_total; return stats; } //done by mohannad Ibranim struct PageRankStats *pageRankPushQuantGraphCSR(struct Arguments *arguments, struct GraphCSR *graph) { //QUANT_SCALE = 16; // uint32_t i; uint32_t v; uint32_t activeVertices = 0; double error_total = 0.0; struct PageRankStats *stats = newPageRankStatsGraphCSR(graph); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float)); float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); uint32_t *riDividedOnDiClause_quant = (uint32_t *)my_malloc(graph->num_vertices * sizeof(uint32_t)); printf(" -----------------------------------------------------\n"); printf("| %-30s %-19s| \n", "Starting Page Rank Push Quant_32", "(tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { Start(timer_inner); error_total = 0; activeVertices = 0; #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph) for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices->out_degree[v]) riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v]; else riDividedOnDiClause[v] = 0.0f; } //1. Extract the quantization parameters from riDividedOnDiClause[] struct quant_params rDivD_params; getMinMax(&rDivD_params, riDividedOnDiClause, graph->num_vertices); rDivD_params.scale = GetScale(rDivD_params.min, rDivD_params.max); rDivD_params.zero = GetZeroPoint(rDivD_params.max, rDivD_params.scale); // printf("Itaration %d's quant parameters:\n\tMin = %f,\tMax = %f\nScale = %f,\tZero = %d\n......................", // stats->iterations,rDivD_params.min,rDivD_params.max,rDivD_params.scale,rDivD_params.zero); //2. Quantize riDividedOnDiClause[] #pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,stats,graph) for(v = 0; v < graph->num_vertices; v++) { riDividedOnDiClause_quant[v] = quantize(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero); } #pragma omp parallel for default(none) private(v) shared(stats,rDivD_params,riDividedOnDiClause_quant,graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads) for(v = 0; v < graph->num_vertices; v++) { uint32_t degree = graph->vertices->out_degree[v]; uint32_t edge_idx = graph->vertices->edges_idx[v]; uint32_t j; for(j = edge_idx ; j < (edge_idx + degree) ; j++) { uint32_t u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]); #pragma omp atomic update pageRanksNext[u] += rDivD_params.scale * (riDividedOnDiClause_quant[v] - rDivD_params.zero); } } #pragma omp parallel for private(v) shared(arguments, stats,pageRanksNext) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + stats->damp * pageRanksNext[v]; stats->pageRanks[v] = nextPageRank; pageRanksNext[v] = 0; double error = fabs(nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, Seconds(timer)); printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(timer); free(timer_inner); free(pageRanksNext); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankDataDrivenPullGraphCSR(struct Arguments *arguments, struct GraphCSR *graph) { double error_total = 0.0; uint32_t i; uint32_t v; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphCSR(graph); struct Vertex *vertices = NULL; uint32_t *sorted_edges_array = NULL; struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); uint8_t *workListCurr = NULL; uint8_t *workListNext = NULL; int activeVertices = 0; workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); resetWorkList(workListNext, graph->num_vertices); resetWorkList(workListCurr, graph->num_vertices); #if DIRECTED vertices = graph->inverse_vertices; sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest; #else vertices = graph->vertices; sorted_edges_array = graph->sorted_edges_array->edges_array_dest; #endif float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Pull DD (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); Start(timer_inner); #pragma omp parallel for reduction(+:activeVertices) for(i = 0; i < graph->num_vertices; i++) { workListNext[i] = 1; activeVertices++; } swapWorkLists(&workListNext, &workListCurr); resetWorkList(workListNext, graph->num_vertices); Stop(timer_inner); printf("| %-10s | %-8u | %-15.13lf | %-9f | \n", "Init", activeVertices, error_total, Seconds(timer_inner)); for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { Start(timer_inner); error_total = 0; activeVertices = 0; #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices->out_degree[v]) riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v]; else riDividedOnDiClause[v] = 0.0f; } #pragma omp parallel for default(none) shared(arguments,riDividedOnDiClause,sorted_edges_array,vertices,workListCurr,workListNext,stats,graph) private(v) reduction(+:activeVertices,error_total) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads) for(v = 0; v < graph->num_vertices; v++) { if(workListCurr[v]) { uint32_t edge_idx; uint32_t degree; uint32_t j; uint32_t u; double error = 0; float nodeIncomingPR = 0; degree = vertices->out_degree[v]; // when directed we use inverse graph out degree means in degree edge_idx = vertices->edges_idx[v]; for(j = edge_idx ; j < (edge_idx + degree) ; j++) { u = EXTRACT_VALUE(sorted_edges_array[j]); nodeIncomingPR += riDividedOnDiClause[u]; // sum (PRi/outDegree(i)) } float oldPageRank = stats->pageRanks[v]; float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR); error = fabs(newPageRank - oldPageRank); error_total += error / graph->num_vertices; if(error >= arguments->epsilon) { stats->pageRanks[v] = newPageRank; degree = graph->vertices->out_degree[v]; edge_idx = graph->vertices->edges_idx[v]; for(j = edge_idx ; j < (edge_idx + degree) ; j++) { u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]); #pragma omp atomic write workListNext[u] = 1; // uint8_t old_val = workListNext[u]; // if(!old_val){ // __sync_bool_compare_and_swap(&workListNext[u], 0, 1); // } } activeVertices++; } } } // activeVertices = getNumOfSetBits(workListNext); swapWorkLists(&workListNext, &workListCurr); resetWorkList(workListNext, graph->num_vertices); Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(workListCurr); free(workListNext); free(timer); free(timer_inner); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankDataDrivenPushGraphCSR(struct Arguments *arguments, struct GraphCSR *graph) { double error_total = 0.0; uint32_t v; uint32_t edge_idx; uint32_t degree; uint32_t j; uint32_t u; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphCSR(graph); struct Vertex *vertices = NULL; uint32_t *sorted_edges_array = NULL; struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); uint8_t *workListCurr = NULL; uint8_t *workListNext = NULL; int activeVertices = 0; workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); resetWorkList(workListNext, graph->num_vertices); resetWorkList(workListCurr, graph->num_vertices); #if DIRECTED vertices = graph->inverse_vertices; sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest; #else vertices = graph->vertices; sorted_edges_array = graph->sorted_edges_array->edges_array_dest; #endif float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); float *aResiduals = (float *) my_malloc(graph->num_vertices * sizeof(float)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Push DD (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); Start(timer_inner); #pragma omp parallel for private(edge_idx,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices) for(v = 0; v < graph->num_vertices; v++) { aResiduals[v] = 0.0; workListCurr[v] = 1; workListNext[v] = 0; activeVertices++; degree = vertices->out_degree[v]; // when directed we use inverse graph out degree means in degree edge_idx = vertices->edges_idx[v]; for(j = edge_idx ; j < (edge_idx + degree) ; j++) { u = EXTRACT_VALUE(sorted_edges_array[j]); if(graph->vertices->out_degree[u]) aResiduals[v] += 1.0f / graph->vertices->out_degree[u]; // sum (PRi/outDegree(i)) } aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v]; } Stop(timer_inner); printf("| %-10s | %-8u | %-15.13lf | %-9f | \n", "Init", activeVertices, error_total, Seconds(timer_inner)); for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { Start(timer_inner); error_total = 0; activeVertices = 0; #pragma omp parallel for default(none) private(edge_idx,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024) num_threads(arguments->ker_numThreads) for(v = 0; v < graph->num_vertices; v++) { if(workListCurr[v]) { float oldPageRank = stats->pageRanks[v]; float newPageRank = aResiduals[v] + stats->pageRanks[v]; error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices); // #pragma omp atomic write stats->pageRanks[v] = newPageRank; degree = graph->vertices->out_degree[v]; float delta = stats->damp * (aResiduals[v] / degree); edge_idx = graph->vertices->edges_idx[v]; for(j = edge_idx ; j < (edge_idx + degree) ; j++) { u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]); float prevResidual = 0.0f; prevResidual = aResiduals[u]; #pragma omp atomic update aResiduals[u] += delta; if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon)) { activeVertices++; if(!workListNext[u]) { // #pragma omp atomic write workListNext[u] = 1; } } } aResiduals[v] = 0.0f; } } // activeVertices = getNumOfSetBits(workListNext); swapWorkLists(&workListNext, &workListCurr); resetWorkList(workListNext, graph->num_vertices); Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(workListCurr); free(workListNext); free(timer); free(timer_inner); free(aResiduals); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankDataDrivenPullPushGraphCSR(struct Arguments *arguments, struct GraphCSR *graph) { double error_total = 0.0; uint32_t v; uint32_t edge_idx; uint32_t degree; uint32_t j; uint32_t u; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphCSR(graph); struct Vertex *vertices = NULL; uint32_t *sorted_edges_array = NULL; struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); uint8_t *workListCurr = NULL; uint8_t *workListNext = NULL; int activeVertices = 0; workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); resetWorkList(workListNext, graph->num_vertices); resetWorkList(workListCurr, graph->num_vertices); #if DIRECTED vertices = graph->inverse_vertices; sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest; #else vertices = graph->vertices; sorted_edges_array = graph->sorted_edges_array->edges_array_dest; #endif float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); float *aResiduals = (float *) my_malloc(graph->num_vertices * sizeof(float)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Pull-Push DD (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); Start(timer_inner); #pragma omp parallel for private(edge_idx,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices) for(v = 0; v < graph->num_vertices; v++) { aResiduals[v] = 0.0f; workListCurr[v] = 1; workListNext[v] = 0; activeVertices++; degree = vertices->out_degree[v]; // when directed we use inverse graph out degree means in degree edge_idx = vertices->edges_idx[v]; for(j = edge_idx ; j < (edge_idx + degree) ; j++) { u = EXTRACT_VALUE(sorted_edges_array[j]); if(graph->vertices->out_degree[u]) aResiduals[v] += 1.0f / graph->vertices->out_degree[u]; // sum (PRi/outDegree(i)) } aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v]; } Stop(timer_inner); printf("| %-10s | %-8u | %-15.13lf | %-9f | \n", "Init", activeVertices, error_total, Seconds(timer_inner)); for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { Start(timer_inner); error_total = 0; activeVertices = 0; #pragma omp parallel for default(none) private(edge_idx,degree,v,j,u) shared(stats,vertices,sorted_edges_array,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024) num_threads(arguments->ker_numThreads) for(v = 0; v < graph->num_vertices; v++) { if(workListCurr[v]) { float nodeIncomingPR = 0.0f; degree = vertices->out_degree[v]; edge_idx = vertices->edges_idx[v]; for(j = edge_idx ; j < (edge_idx + degree) ; j++) { u = EXTRACT_VALUE(sorted_edges_array[j]); nodeIncomingPR += stats->pageRanks[u] / graph->vertices->out_degree[u]; } float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR); float oldPageRank = stats->pageRanks[v]; // float newPageRank = aResiduals[v]+pageRanks[v]; error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices); #pragma omp atomic write stats->pageRanks[v] = newPageRank; degree = graph->vertices->out_degree[v]; float delta = stats->damp * (aResiduals[v] / degree); edge_idx = graph->vertices->edges_idx[v]; for(j = edge_idx ; j < (edge_idx + degree) ; j++) { u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]); float prevResidual = 0.0f; prevResidual = aResiduals[u]; #pragma omp atomic update aResiduals[u] += delta; if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon)) { activeVertices++; aResiduals[u] += delta; if(!workListNext[u]) { workListNext[u] = 1; } } } aResiduals[v] = 0.0f; } } // activeVertices = getNumOfSetBits(workListNext); swapWorkLists(&workListNext, &workListCurr); resetWorkList(workListNext, graph->num_vertices); Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(workListCurr); free(workListNext); free(timer); free(timer_inner); free(aResiduals); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } // float* pageRankDataDrivenPullFixedPointGraphCSR(struct Arguments *arguments, struct GraphCSR* graph){ // } // float* pageRankDataDrivenPushFixedPointGraphCSR(struct Arguments *arguments, struct GraphCSR* graph){ // } // float* pageRankDataDrivenPullPushFixedPointGraphCSR(struct Arguments *arguments, struct GraphCSR* graph){ // } // ******************************************************************************************** // *************** ArrayList DataStructure ************** // ******************************************************************************************** struct PageRankStats *pageRankGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph) { struct PageRankStats *stats = NULL; switch (arguments->pushpull) { case 0: // pull stats = pageRankPullGraphAdjArrayList(arguments, graph); break; case 1: // push stats = pageRankPushGraphAdjArrayList(arguments, graph); break; case 2: // pull stats = pageRankPullFixedPointGraphAdjArrayList(arguments, graph); break; case 3: // push stats = pageRankPushFixedPointGraphAdjArrayList(arguments, graph); break; case 4: // pull stats = pageRankDataDrivenPullGraphAdjArrayList(arguments, graph); break; case 5: // push stats = pageRankDataDrivenPushGraphAdjArrayList(arguments, graph); break; case 6: // pullpush stats = pageRankDataDrivenPullPushGraphAdjArrayList(arguments, graph); break; default:// push stats = pageRankPullGraphAdjArrayList(arguments, graph); break; } return stats; } struct PageRankStats *pageRankPullGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph) { double error_total = 0.0; uint32_t j; uint32_t v; uint32_t u; uint32_t degree; uint32_t activeVertices = 0; struct EdgeList *Nodes; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphAdjArrayList(graph); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float)); float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Pull (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { error_total = 0; activeVertices = 0; Start(timer_inner); #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices[v].out_degree) riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree; else riDividedOnDiClause[v] = 0.0f; } #pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024) for(v = 0; v < graph->num_vertices; v++) { float nodeIncomingPR = 0.0f; #if DIRECTED // will look at the other neighbours if directed by using inverese edge list Nodes = graph->vertices[v].inNodes; degree = graph->vertices[v].in_degree; #else Nodes = graph->vertices[v].outNodes; degree = graph->vertices[v].out_degree; #endif for(j = 0 ; j < (degree) ; j++) { u = Nodes->edges_array_dest[j]; nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree; } pageRanksNext[v] = nodeIncomingPR; } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]); stats->pageRanks[v] = nextPageRank; pageRanksNext[v] = 0.0f; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // printf(" -----------------------------------------------------\n"); // printf("| %-10s | %-8lf | %-15s | %-9s | \n","PR Sum ",sum, stats->iterations, stats->time_total); // printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(timer); free(timer_inner); free(pageRanksNext); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankPushGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph) { double error_total = 0.0; uint32_t i; uint32_t v; // double error = 0; uint32_t activeVertices = 0; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphAdjArrayList(graph); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); struct EdgeList *Nodes; omp_lock_t *vertex_lock = (omp_lock_t *) my_malloc( graph->num_vertices * sizeof(omp_lock_t)); #pragma omp parallel for default(none) private(i) shared(graph,vertex_lock) for (i = 0; i < graph->num_vertices; i++) { omp_init_lock(&(vertex_lock[i])); } float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float)); float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Push (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0.0f; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { Start(timer_inner); error_total = 0; activeVertices = 0; #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph) for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices[v].out_degree) riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree; else riDividedOnDiClause[v] = 0.0f; } #pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024) for(v = 0; v < graph->num_vertices; v++) { Nodes = graph->vertices[v].outNodes; uint32_t degree = graph->vertices[v].out_degree; // uint32_t tid = omp_get_thread_num(); uint32_t j; for(j = 0 ; j < (degree) ; j++) { uint32_t u = Nodes->edges_array_dest[j]; // omp_set_lock(&(vertex_lock[u])); // pageRanksNext[u] += riDividedOnDiClause[v]; // omp_unset_lock((&vertex_lock[u])); #pragma omp atomic update pageRanksNext[u] += riDividedOnDiClause[v]; // __atomic_fetch_add(&pageRanksNext[u], riDividedOnDiClause[v], __ATOMIC_RELAXED); // printf("tid %u degree %u edge_idx %u v %u u %u \n",tid,degree,edge_idx,v,u ); // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]); } } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]); stats->pageRanks[v] = nextPageRank; pageRanksNext[v] = 0.0f; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); #pragma omp parallel for for (i = 0; i < graph->num_vertices; i++) { omp_destroy_lock(&(vertex_lock[i])); } free(timer); free(timer_inner); free(vertex_lock); free(pageRanksNext); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankPullFixedPointGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph) { double error_total = 0.0; uint32_t j; uint32_t v; uint32_t u; uint32_t degree; uint32_t activeVertices = 0; struct EdgeList *Nodes; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphAdjArrayList(graph); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); uint64_t *riDividedOnDiClause = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Pull FP (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { error_total = 0; activeVertices = 0; Start(timer_inner); #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices[v].out_degree) riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices[v].out_degree); else riDividedOnDiClause[v] = 0.0f; } #pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024) for(v = 0; v < graph->num_vertices; v++) { float nodeIncomingPR = 0.0f; #if DIRECTED // will look at the other neighbours if directed by using inverese edge list Nodes = graph->vertices[v].inNodes; degree = graph->vertices[v].in_degree; #else Nodes = graph->vertices[v].outNodes; degree = graph->vertices[v].out_degree; #endif for(j = 0 ; j < (degree) ; j++) { u = Nodes->edges_array_dest[j]; nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree; } pageRanksNext[v] = nodeIncomingPR; } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v])); stats->pageRanks[v] = nextPageRank; pageRanksNext[v] = 0.0f; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // printf(" -----------------------------------------------------\n"); // printf("| %-10s | %-8lf | %-15s | %-9s | \n","PR Sum ",sum, stats->iterations, stats->time_total); // printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(timer); free(timer_inner); free(pageRanksNext); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankPushFixedPointGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph) { double error_total = 0.0; uint32_t i; uint32_t v; // double error = 0; uint32_t activeVertices = 0; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphAdjArrayList(graph); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); struct EdgeList *Nodes; omp_lock_t *vertex_lock = (omp_lock_t *) my_malloc( graph->num_vertices * sizeof(omp_lock_t)); #pragma omp parallel for default(none) private(i) shared(graph,vertex_lock) for (i = 0; i < graph->num_vertices; i++) { omp_init_lock(&(vertex_lock[i])); } uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); uint64_t *riDividedOnDiClause = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Push FP (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0.0f; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { Start(timer_inner); error_total = 0; activeVertices = 0; #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph) for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices[v].out_degree) riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices[v].out_degree); else riDividedOnDiClause[v] = 0.0f; } #pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024) for(v = 0; v < graph->num_vertices; v++) { Nodes = graph->vertices[v].outNodes; uint32_t degree = graph->vertices[v].out_degree; // uint32_t tid = omp_get_thread_num(); uint32_t j; for(j = 0 ; j < (degree) ; j++) { uint32_t u = Nodes->edges_array_dest[j]; // omp_set_lock(&(vertex_lock[u])); // pageRanksNext[u] += riDividedOnDiClause[v]; // omp_unset_lock((&vertex_lock[u])); #pragma omp atomic update pageRanksNext[u] += riDividedOnDiClause[v]; // __atomic_fetch_add(&pageRanksNext[u], riDividedOnDiClause[v], __ATOMIC_RELAXED); // printf("tid %u degree %u edge_idx %u v %u u %u \n",tid,degree,edge_idx,v,u ); // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]); } } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v])); stats->pageRanks[v] = nextPageRank; pageRanksNext[v] = 0.0f; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); #pragma omp parallel for for (i = 0; i < graph->num_vertices; i++) { omp_destroy_lock(&(vertex_lock[i])); } free(timer); free(timer_inner); free(vertex_lock); free(pageRanksNext); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankDataDrivenPullGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph) { double error_total = 0.0; uint32_t i; uint32_t v; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphAdjArrayList(graph); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); uint8_t *workListCurr = NULL; uint8_t *workListNext = NULL; int activeVertices = 0; struct EdgeList *Nodes; workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); resetWorkList(workListNext, graph->num_vertices); resetWorkList(workListCurr, graph->num_vertices); float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Pull DD (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); Start(timer_inner); #pragma omp parallel for reduction(+:activeVertices) for(i = 0; i < graph->num_vertices; i++) { workListNext[i] = 1; activeVertices++; } swapWorkLists(&workListNext, &workListCurr); resetWorkList(workListNext, graph->num_vertices); Stop(timer_inner); printf("| %-10s | %-8u | %-15.13lf | %-9f | \n", "Init", activeVertices, error_total, Seconds(timer_inner)); for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { Start(timer_inner); error_total = 0; activeVertices = 0; #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices[v].out_degree) riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree; else riDividedOnDiClause[v] = 0.0f; } #pragma omp parallel for default(none) shared(arguments,riDividedOnDiClause,workListCurr,workListNext,stats,graph) private(v,Nodes) reduction(+:activeVertices,error_total) schedule(dynamic, 1024) for(v = 0; v < graph->num_vertices; v++) { if(workListCurr[v]) { uint32_t degree; uint32_t j; uint32_t u; double error = 0; float nodeIncomingPR = 0; #if DIRECTED // will look at the other neighbours if directed by using inverese edge list Nodes = graph->vertices[v].inNodes; degree = graph->vertices[v].in_degree; #else Nodes = graph->vertices[v].outNodes; degree = graph->vertices[v].out_degree; #endif for(j = 0 ; j < (degree) ; j++) { u = Nodes->edges_array_dest[j]; nodeIncomingPR += riDividedOnDiClause[u]; // sum (PRi/outDegree(i)) } float oldPageRank = stats->pageRanks[v]; float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR); error = fabs(newPageRank - oldPageRank); error_total += error / graph->num_vertices; if(error >= arguments->epsilon) { stats->pageRanks[v] = newPageRank; Nodes = graph->vertices[v].outNodes; degree = graph->vertices[v].out_degree; for(j = 0 ; j < (degree) ; j++) { u = Nodes->edges_array_dest[j]; #pragma omp atomic write workListNext[u] = 1; // uint8_t old_val = workListNext[u]; // if(!old_val){ // __sync_bool_compare_and_swap(&workListNext[u], 0, 1); // } } activeVertices++; } } } // activeVertices = getNumOfSetBits(workListNext); swapWorkLists(&workListNext, &workListCurr); resetWorkList(workListNext, graph->num_vertices); Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(workListCurr); free(workListNext); free(timer); free(timer_inner); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankDataDrivenPushGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph) { double error_total = 0.0; uint32_t v; uint32_t degree; uint32_t j; uint32_t u; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphAdjArrayList(graph); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); uint8_t *workListCurr = NULL; uint8_t *workListNext = NULL; int activeVertices = 0; struct EdgeList *Nodes; workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); resetWorkList(workListNext, graph->num_vertices); resetWorkList(workListCurr, graph->num_vertices); float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); float *aResiduals = (float *) my_malloc(graph->num_vertices * sizeof(float)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Push DD (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); Start(timer_inner); #pragma omp parallel for private(Nodes,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices) for(v = 0; v < graph->num_vertices; v++) { aResiduals[v] = 0.0; workListCurr[v] = 1; workListNext[v] = 0; activeVertices++; #if DIRECTED // will look at the other neighbours if directed by using inverese edge list Nodes = graph->vertices[v].inNodes; degree = graph->vertices[v].in_degree; #else Nodes = graph->vertices[v].outNodes; degree = graph->vertices[v].out_degree; #endif for(j = 0 ; j < (degree) ; j++) { u = Nodes->edges_array_dest[j]; if(graph->vertices[u].out_degree) aResiduals[v] += 1.0f / graph->vertices[u].out_degree; // sum (PRi/outDegree(i)) } aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v]; } Stop(timer_inner); printf("| %-10s | %-8u | %-15.13lf | %-9f | \n", "Init", activeVertices, error_total, Seconds(timer_inner)); for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { Start(timer_inner); error_total = 0; activeVertices = 0; #pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024) for(v = 0; v < graph->num_vertices; v++) { if(workListCurr[v]) { float oldPageRank = stats->pageRanks[v]; float newPageRank = aResiduals[v] + stats->pageRanks[v]; error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices); // #pragma omp atomic write stats->pageRanks[v] = newPageRank; Nodes = graph->vertices[v].outNodes; degree = graph->vertices[v].out_degree; float delta = stats->damp * (aResiduals[v] / degree); for(j = 0 ; j < (degree) ; j++) { u = Nodes->edges_array_dest[j]; float prevResidual = 0.0f; prevResidual = aResiduals[u]; #pragma omp atomic update aResiduals[u] += delta; if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon)) { activeVertices++; if(!workListNext[u]) { // #pragma omp atomic write workListNext[u] = 1; } } } aResiduals[v] = 0.0f; } } // activeVertices = getNumOfSetBits(workListNext); swapWorkLists(&workListNext, &workListCurr); resetWorkList(workListNext, graph->num_vertices); Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(workListCurr); free(workListNext); free(timer); free(timer_inner); free(aResiduals); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankDataDrivenPullPushGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph) { double error_total = 0.0; uint32_t v; uint32_t degree; uint32_t j; uint32_t u; struct EdgeList *Nodes; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphAdjArrayList(graph); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); uint8_t *workListCurr = NULL; uint8_t *workListNext = NULL; int activeVertices = 0; workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); resetWorkList(workListNext, graph->num_vertices); resetWorkList(workListCurr, graph->num_vertices); float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); float *aResiduals = (float *) my_malloc(graph->num_vertices * sizeof(float)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Pull-Push DD (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); Start(timer_inner); #pragma omp parallel for private(Nodes,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices) for(v = 0; v < graph->num_vertices; v++) { aResiduals[v] = 0.0f; workListCurr[v] = 1; workListNext[v] = 0; activeVertices++; #if DIRECTED // will look at the other neighbours if directed by using inverese edge list Nodes = graph->vertices[v].inNodes; degree = graph->vertices[v].in_degree; #else Nodes = graph->vertices[v].outNodes; degree = graph->vertices[v].out_degree; #endif for(j = 0 ; j < (degree) ; j++) { u = Nodes->edges_array_dest[j]; if(graph->vertices[u].out_degree) aResiduals[v] += 1.0f / graph->vertices[u].out_degree; // sum (PRi/outDegree(i)) } aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v]; } Stop(timer_inner); printf("| %-10s | %-8u | %-15.13lf | %-9f | \n", "Init", activeVertices, error_total, Seconds(timer_inner)); for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { Start(timer_inner); error_total = 0; activeVertices = 0; #pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024) for(v = 0; v < graph->num_vertices; v++) { if(workListCurr[v]) { float nodeIncomingPR = 0.0f; #if DIRECTED // will look at the other neighbours if directed by using inverese edge list Nodes = graph->vertices[v].inNodes; degree = graph->vertices[v].in_degree; #else Nodes = graph->vertices[v].outNodes; degree = graph->vertices[v].out_degree; #endif for(j = 0 ; j < (degree) ; j++) { u = Nodes->edges_array_dest[j]; nodeIncomingPR += stats->pageRanks[u] / graph->vertices[u].out_degree; } float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR); float oldPageRank = stats->pageRanks[v]; // float newPageRank = aResiduals[v]+pageRanks[v]; error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices); #pragma omp atomic write stats->pageRanks[v] = newPageRank; Nodes = graph->vertices[v].outNodes; degree = graph->vertices[v].out_degree; float delta = stats->damp * (aResiduals[v] / degree); for(j = 0 ; j < (degree) ; j++) { uint32_t u = Nodes->edges_array_dest[j]; float prevResidual = 0.0f; prevResidual = aResiduals[u]; #pragma omp atomic update aResiduals[u] += delta; if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon)) { activeVertices++; if(!workListNext[u]) { workListNext[u] = 1; } } } aResiduals[v] = 0.0f; } } // activeVertices = getNumOfSetBits(workListNext); swapWorkLists(&workListNext, &workListCurr); resetWorkList(workListNext, graph->num_vertices); Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(workListCurr); free(workListNext); free(timer); free(timer_inner); free(aResiduals); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } // ******************************************************************************************** // *************** LinkedList DataStructure ************** // ******************************************************************************************** struct PageRankStats *pageRankGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph) { struct PageRankStats *stats = NULL; switch (arguments->pushpull) { case 0: // pull stats = pageRankPullGraphAdjLinkedList(arguments, graph); break; case 1: // push stats = pageRankPushGraphAdjLinkedList(arguments, graph); break; case 2: // pull stats = pageRankPullFixedPointGraphAdjLinkedList(arguments, graph); break; case 3: // push stats = pageRankPushFixedPointGraphAdjLinkedList(arguments, graph); break; case 4: // pull stats = pageRankDataDrivenPullGraphAdjLinkedList(arguments, graph); break; case 5: // push stats = pageRankDataDrivenPushGraphAdjLinkedList(arguments, graph); break; case 6: // pullpush stats = pageRankDataDrivenPullPushGraphAdjLinkedList(arguments, graph); break; default:// push stats = pageRankPullGraphAdjLinkedList(arguments, graph); break; } return stats; } struct PageRankStats *pageRankPullGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph) { double error_total = 0.0; uint32_t j; uint32_t v; uint32_t u; uint32_t degree; uint32_t activeVertices = 0; struct AdjLinkedListNode *Nodes; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphAdjLinkedList(graph); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float)); float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Pull (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { error_total = 0; activeVertices = 0; Start(timer_inner); #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices[v].out_degree) riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree; else riDividedOnDiClause[v] = 0.0f; } #pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024) for(v = 0; v < graph->num_vertices; v++) { float nodeIncomingPR = 0.0f; #if DIRECTED // will look at the other neighbours if directed by using inverese edge list Nodes = graph->vertices[v].inNodes; degree = graph->vertices[v].in_degree; #else Nodes = graph->vertices[v].outNodes; degree = graph->vertices[v].out_degree; #endif for(j = 0 ; j < (degree) ; j++) { u = Nodes->dest; Nodes = Nodes->next; nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree; } pageRanksNext[v] = nodeIncomingPR; } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]); stats->pageRanks[v] = nextPageRank; pageRanksNext[v] = 0.0f; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // printf(" -----------------------------------------------------\n"); // printf("| %-10s | %-8lf | %-15s | %-9s | \n","PR Sum ",sum, stats->iterations, stats->time_total); // printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(timer); free(timer_inner); free(pageRanksNext); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankPushGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph) { double error_total = 0.0; uint32_t i; uint32_t v; // double error = 0; uint32_t activeVertices = 0; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphAdjLinkedList(graph); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); struct AdjLinkedListNode *Nodes; omp_lock_t *vertex_lock = (omp_lock_t *) my_malloc( graph->num_vertices * sizeof(omp_lock_t)); #pragma omp parallel for default(none) private(i) shared(graph,vertex_lock) for (i = 0; i < graph->num_vertices; i++) { omp_init_lock(&(vertex_lock[i])); } float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float)); float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Push (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0.0f; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { Start(timer_inner); error_total = 0; activeVertices = 0; #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph) for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices[v].out_degree) riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree; else riDividedOnDiClause[v] = 0.0f; } #pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024) for(v = 0; v < graph->num_vertices; v++) { Nodes = graph->vertices[v].outNodes; uint32_t degree = graph->vertices[v].out_degree; // uint32_t tid = omp_get_thread_num(); uint32_t j; for(j = 0 ; j < (degree) ; j++) { uint32_t u = Nodes->dest; Nodes = Nodes->next; // omp_set_lock(&(vertex_lock[u])); // pageRanksNext[u] += riDividedOnDiClause[v]; // omp_unset_lock((&vertex_lock[u])); #pragma omp atomic update pageRanksNext[u] += riDividedOnDiClause[v]; // __atomic_fetch_add(&pageRanksNext[u], riDividedOnDiClause[v], __ATOMIC_RELAXED); // printf("tid %u degree %u edge_idx %u v %u u %u \n",tid,degree,edge_idx,v,u ); // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]); } } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]); stats->pageRanks[v] = nextPageRank; pageRanksNext[v] = 0.0f; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); #pragma omp parallel for for (i = 0; i < graph->num_vertices; i++) { omp_destroy_lock(&(vertex_lock[i])); } free(timer); free(timer_inner); free(vertex_lock); free(pageRanksNext); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankPullFixedPointGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph) { double error_total = 0.0; uint32_t j; uint32_t v; uint32_t u; uint32_t degree; uint32_t activeVertices = 0; struct AdjLinkedListNode *Nodes; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphAdjLinkedList(graph); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); uint64_t *riDividedOnDiClause = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Pull FP (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { error_total = 0; activeVertices = 0; Start(timer_inner); #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices[v].out_degree) riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices[v].out_degree); else riDividedOnDiClause[v] = 0.0f; } #pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024) for(v = 0; v < graph->num_vertices; v++) { float nodeIncomingPR = 0.0f; #if DIRECTED // will look at the other neighbours if directed by using inverese edge list Nodes = graph->vertices[v].inNodes; degree = graph->vertices[v].in_degree; #else Nodes = graph->vertices[v].outNodes; degree = graph->vertices[v].out_degree; #endif for(j = 0 ; j < (degree) ; j++) { u = Nodes->dest; Nodes = Nodes->next; nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree; } pageRanksNext[v] = nodeIncomingPR; } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v])); stats->pageRanks[v] = nextPageRank; pageRanksNext[v] = 0.0f; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // printf(" -----------------------------------------------------\n"); // printf("| %-10s | %-8lf | %-15s | %-9s | \n","PR Sum ",sum, stats->iterations, stats->time_total); // printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(timer); free(timer_inner); free(pageRanksNext); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankPushFixedPointGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph) { double error_total = 0.0; uint32_t i; uint32_t v; // double error = 0; uint32_t activeVertices = 0; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphAdjLinkedList(graph); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); struct AdjLinkedListNode *Nodes; omp_lock_t *vertex_lock = (omp_lock_t *) my_malloc( graph->num_vertices * sizeof(omp_lock_t)); #pragma omp parallel for default(none) private(i) shared(graph,vertex_lock) for (i = 0; i < graph->num_vertices; i++) { omp_init_lock(&(vertex_lock[i])); } uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); uint64_t *riDividedOnDiClause = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Push FP (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph) for(v = 0; v < graph->num_vertices; v++) { pageRanksNext[v] = 0.0f; } for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { Start(timer_inner); error_total = 0; activeVertices = 0; #pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph) for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices[v].out_degree) riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices[v].out_degree); else riDividedOnDiClause[v] = 0.0f; } #pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024) for(v = 0; v < graph->num_vertices; v++) { Nodes = graph->vertices[v].outNodes; uint32_t degree = graph->vertices[v].out_degree; // uint32_t tid = omp_get_thread_num(); uint32_t j; for(j = 0 ; j < (degree) ; j++) { uint32_t u = Nodes->dest; Nodes = Nodes->next; // omp_set_lock(&(vertex_lock[u])); // pageRanksNext[u] += riDividedOnDiClause[v]; // omp_unset_lock((&vertex_lock[u])); #pragma omp atomic update pageRanksNext[u] += riDividedOnDiClause[v]; // __atomic_fetch_add(&pageRanksNext[u], riDividedOnDiClause[v], __ATOMIC_RELAXED); // printf("tid %u degree %u edge_idx %u v %u u %u \n",tid,degree,edge_idx,v,u ); // addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]); } } #pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices) for(v = 0; v < graph->num_vertices; v++) { float prevPageRank = stats->pageRanks[v]; float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v])); stats->pageRanks[v] = nextPageRank; pageRanksNext[v] = 0.0f; double error = fabs( nextPageRank - prevPageRank); error_total += (error / graph->num_vertices); if(error >= arguments->epsilon) { activeVertices++; } } Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); #pragma omp parallel for for (i = 0; i < graph->num_vertices; i++) { omp_destroy_lock(&(vertex_lock[i])); } free(timer); free(timer_inner); free(vertex_lock); free(pageRanksNext); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankDataDrivenPullGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph) { double error_total = 0.0; uint32_t i; uint32_t v; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphAdjLinkedList(graph); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); uint8_t *workListCurr = NULL; uint8_t *workListNext = NULL; int activeVertices = 0; struct AdjLinkedListNode *Nodes; workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); resetWorkList(workListNext, graph->num_vertices); resetWorkList(workListCurr, graph->num_vertices); float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Pull DD (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); Start(timer_inner); #pragma omp parallel for reduction(+:activeVertices) for(i = 0; i < graph->num_vertices; i++) { workListNext[i] = 1; activeVertices++; } swapWorkLists(&workListNext, &workListCurr); resetWorkList(workListNext, graph->num_vertices); Stop(timer_inner); printf("| %-10s | %-8u | %-15.13lf | %-9f | \n", "Init", activeVertices, error_total, Seconds(timer_inner)); for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { Start(timer_inner); error_total = 0; activeVertices = 0; #pragma omp parallel for for(v = 0; v < graph->num_vertices; v++) { if(graph->vertices[v].out_degree) riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree; else riDividedOnDiClause[v] = 0.0f; } #pragma omp parallel for default(none) shared(arguments, riDividedOnDiClause, workListCurr, workListNext, stats, graph) private(v,Nodes) reduction(+:activeVertices,error_total) schedule(dynamic, 1024) for(v = 0; v < graph->num_vertices; v++) { if(workListCurr[v]) { uint32_t degree; uint32_t j; uint32_t u; double error = 0; float nodeIncomingPR = 0; #if DIRECTED // will look at the other neighbours if directed by using inverese edge list Nodes = graph->vertices[v].inNodes; degree = graph->vertices[v].in_degree; #else Nodes = graph->vertices[v].outNodes; degree = graph->vertices[v].out_degree; #endif for(j = 0 ; j < (degree) ; j++) { u = Nodes->dest; Nodes = Nodes->next; nodeIncomingPR += riDividedOnDiClause[u]; // sum (PRi/outDegree(i)) } float oldPageRank = stats->pageRanks[v]; float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR); error = fabs(newPageRank - oldPageRank); error_total += error / graph->num_vertices; if(error >= arguments->epsilon) { stats->pageRanks[v] = newPageRank; Nodes = graph->vertices[v].outNodes; degree = graph->vertices[v].out_degree; for(j = 0 ; j < (degree) ; j++) { u = Nodes->dest; Nodes = Nodes->next; #pragma omp atomic write workListNext[u] = 1; // uint8_t old_val = workListNext[u]; // if(!old_val){ // __sync_bool_compare_and_swap(&workListNext[u], 0, 1); // } } activeVertices++; } } } // activeVertices = getNumOfSetBits(workListNext); swapWorkLists(&workListNext, &workListCurr); resetWorkList(workListNext, graph->num_vertices); Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(workListCurr); free(workListNext); free(timer); free(timer_inner); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankDataDrivenPushGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph) { double error_total = 0.0; uint32_t v; uint32_t degree; uint32_t j; uint32_t u; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphAdjLinkedList(graph); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); uint8_t *workListCurr = NULL; uint8_t *workListNext = NULL; int activeVertices = 0; struct AdjLinkedListNode *Nodes; workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); resetWorkList(workListNext, graph->num_vertices); resetWorkList(workListCurr, graph->num_vertices); float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); float *aResiduals = (float *) my_malloc(graph->num_vertices * sizeof(float)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Push DD (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); Start(timer_inner); #pragma omp parallel for private(Nodes,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices) for(v = 0; v < graph->num_vertices; v++) { aResiduals[v] = 0.0; workListCurr[v] = 1; workListNext[v] = 0; activeVertices++; #if DIRECTED // will look at the other neighbours if directed by using inverese edge list Nodes = graph->vertices[v].inNodes; degree = graph->vertices[v].in_degree; #else Nodes = graph->vertices[v].outNodes; degree = graph->vertices[v].out_degree; #endif for(j = 0 ; j < (degree) ; j++) { u = Nodes->dest; Nodes = Nodes->next; if(graph->vertices[u].out_degree) aResiduals[v] += 1.0f / graph->vertices[u].out_degree; // sum (PRi/outDegree(i)) } aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v]; } Stop(timer_inner); printf("| %-10s | %-8u | %-15.13lf | %-9f | \n", "Init", activeVertices, error_total, Seconds(timer_inner)); for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { Start(timer_inner); error_total = 0; activeVertices = 0; #pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024) for(v = 0; v < graph->num_vertices; v++) { if(workListCurr[v]) { float oldPageRank = stats->pageRanks[v]; float newPageRank = aResiduals[v] + stats->pageRanks[v]; error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices); // #pragma omp atomic write stats->pageRanks[v] = newPageRank; Nodes = graph->vertices[v].outNodes; degree = graph->vertices[v].out_degree; float delta = stats->damp * (aResiduals[v] / degree); for(j = 0 ; j < (degree) ; j++) { u = Nodes->dest; Nodes = Nodes->next; float prevResidual = 0.0f; prevResidual = aResiduals[u]; #pragma omp atomic update aResiduals[u] += delta; if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon)) { activeVertices++; if(!workListNext[u]) { // #pragma omp atomic write workListNext[u] = 1; } } } aResiduals[v] = 0.0f; } } // activeVertices = getNumOfSetBits(workListNext); swapWorkLists(&workListNext, &workListCurr); resetWorkList(workListNext, graph->num_vertices); Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(workListCurr); free(workListNext); free(timer); free(timer_inner); free(aResiduals); free(riDividedOnDiClause); stats->error_total = error_total; return stats; } struct PageRankStats *pageRankDataDrivenPullPushGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph) { double error_total = 0.0; uint32_t v; uint32_t degree; uint32_t j; uint32_t u; struct AdjLinkedListNode *Nodes; // float init_pr = 1.0f / (float)graph->num_vertices; struct PageRankStats *stats = newPageRankStatsGraphAdjLinkedList(graph); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer)); uint8_t *workListCurr = NULL; uint8_t *workListNext = NULL; int activeVertices = 0; workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t)); resetWorkList(workListNext, graph->num_vertices); resetWorkList(workListCurr, graph->num_vertices); float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float)); float *aResiduals = (float *) my_malloc(graph->num_vertices * sizeof(float)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Page Rank Pull-Push DD (tolerance/epsilon)"); printf(" -----------------------------------------------------\n"); printf("| %-51.13lf | \n", arguments->epsilon); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); Start(timer); Start(timer_inner); #pragma omp parallel for private(Nodes,degree,v,j,u) shared(stats,workListCurr,workListNext,aResiduals) reduction(+:activeVertices) for(v = 0; v < graph->num_vertices; v++) { aResiduals[v] = 0.0f; workListCurr[v] = 1; workListNext[v] = 0; activeVertices++; #if DIRECTED // will look at the other neighbours if directed by using inverese edge list Nodes = graph->vertices[v].inNodes; degree = graph->vertices[v].in_degree; #else Nodes = graph->vertices[v].outNodes; degree = graph->vertices[v].out_degree; #endif for(j = 0 ; j < (degree) ; j++) { u = Nodes->dest; Nodes = Nodes->next; if(graph->vertices[u].out_degree) aResiduals[v] += 1.0f / graph->vertices[u].out_degree; // sum (PRi/outDegree(i)) } aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v]; } Stop(timer_inner); printf("| %-10s | %-8u | %-15.13lf | %-9f | \n", "Init", activeVertices, error_total, Seconds(timer_inner)); for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++) { Start(timer_inner); error_total = 0; activeVertices = 0; #pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024) for(v = 0; v < graph->num_vertices; v++) { if(workListCurr[v]) { float nodeIncomingPR = 0.0f; #if DIRECTED // will look at the other neighbours if directed by using inverese edge list Nodes = graph->vertices[v].inNodes; degree = graph->vertices[v].in_degree; #else Nodes = graph->vertices[v].outNodes; degree = graph->vertices[v].out_degree; #endif for(j = 0 ; j < (degree) ; j++) { u = Nodes->dest; Nodes = Nodes->next; nodeIncomingPR += stats->pageRanks[u] / graph->vertices[u].out_degree; } float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR); float oldPageRank = stats->pageRanks[v]; // float newPageRank = aResiduals[v]+pageRanks[v]; error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices); #pragma omp atomic write stats->pageRanks[v] = newPageRank; Nodes = graph->vertices[v].outNodes; degree = graph->vertices[v].out_degree; float delta = stats->damp * (aResiduals[v] / degree); for(j = 0 ; j < (degree) ; j++) { u = Nodes->dest; Nodes = Nodes->next; float prevResidual = 0.0f; prevResidual = aResiduals[u]; #pragma omp atomic update aResiduals[u] += delta; if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon)) { activeVertices++; if(!workListNext[u]) { workListNext[u] = 1; } } } aResiduals[v] = 0.0f; } } // activeVertices = getNumOfSetBits(workListNext); swapWorkLists(&workListNext, &workListCurr); resetWorkList(workListNext, graph->num_vertices); Stop(timer_inner); printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner)); if(activeVertices == 0) break; }// end iteration loop double sum = 0.0f; #pragma omp parallel for reduction(+:sum) for(v = 0; v < graph->num_vertices; v++) { stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices; sum += stats->pageRanks[v]; } Stop(timer); stats->time_total = Seconds(timer); printf(" -----------------------------------------------------\n"); printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)"); printf(" -----------------------------------------------------\n"); printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total); printf(" -----------------------------------------------------\n"); // pageRankPrint(pageRanks, graph->num_vertices); free(workListCurr); free(workListNext); free(timer); free(timer_inner); free(aResiduals); free(riDividedOnDiClause); stats->error_total = error_total; return stats; }
activation_functions.h
#ifndef ANAKIN_SABER_FUNCS_IMPL_X86_MATH_ACTIVATION_FUNCTIONS_H #define ANAKIN_SABER_FUNCS_IMPL_X86_MATH_ACTIVATION_FUNCTIONS_H #include <math.h> #include <string> #include "saber/saber_types.h" #include "utils/logger/logger.h" #include "saber/funcs/impl/x86/saber_avx2_math.h" namespace anakin { namespace saber { namespace math { template <typename T> void sigmoid(size_t len, T *x, T *y) { for (size_t i = 0; i < len; i++) { y[i] = 1. / (1. + exp(-x[i])); } } template <typename T> void parallel_sigmoid(size_t len, T *x, T *y) { #pragma omp parallel for for (size_t i = 0; i < len; i++) { y[i] = 1. / (1. + exp(-x[i])); } } template <typename T> void relu(size_t len, T *x, T *y) { for (size_t i = 0; i < len; i++) { y[i] = x[i] < 0 ? 0 : x[i]; } } template <typename T> void parallel_relu(size_t len, T *x, T *y) { #pragma omp parallel for for (size_t i = 0; i < len; i++) { y[i] = x[i] < 0 ? 0 : x[i]; } } template <typename T> void tanh(size_t len, T *x, T *y) { for (size_t i = 0; i < len; i++) { T e_x = exp(2 * x[i]); y[i] = (e_x - 1) / (e_x + 1); } } template <typename T> void parallel_tanh(size_t len, T *x, T *y) { #pragma omp parallel for for (size_t i = 0; i < len; i++) { T e_x = exp(2 * x[i]); y[i] = (e_x - 1) / (e_x + 1); } } template <typename T> void stanh(size_t len, T *x, T *y) { for (size_t i = 0; i < len; i++) { T e_x = exp(4. * x[i] / 3.); y[i] = 1.7159 * (e_x - 1) / (e_x + 1); } } template <typename T> void parallel_stanh(size_t len, T *x, T *y) { #pragma omp parallel for for (size_t i = 0; i < len; i++) { T e_x = exp(4. * x[i] / 3.); y[i] = 1.7159 * (e_x - 1) / (e_x + 1); } } template <typename T> void identity(size_t len, T *x, T *y) { for (size_t i = 0; i < len; i++) { y[i] = x[i]; } } template <typename T> void parallel_identity(size_t len, T *x, T *y) { #pragma omp parallel for for (size_t i = 0; i < len; i++) { y[i] = x[i]; } } template <typename T> struct Active { typedef void (*Act)(size_t, T*, T*); typedef T (*Act_m256)(T); }; static Active<float>::Act k_act_float[] = { nullptr, &sigmoid<float>, &relu<float>, &tanh<float>, nullptr, nullptr, &identity<float>, &sigmoid<float>, &tanh<float>, &stanh<float> }; static Active<float>::Act k_parallel_act_float[] = { nullptr, &parallel_sigmoid<float>, &parallel_relu<float>, &parallel_tanh<float>, nullptr, nullptr, &parallel_identity<float>, &parallel_sigmoid<float>, &parallel_tanh<float>, &parallel_stanh<float> }; inline void activation(size_t len, float *src, float *dst, int index) { auto *func = k_act_float[index]; if (!func) { LOG(ERROR) << "activation not implemented!"; } func(len, src, dst); } inline void parallel_activation(size_t len, float *src, float *dst, int index) { auto *func = k_parallel_act_float[index]; if (!func) { LOG(ERROR) << "activation not implemented!"; } func(len, src, dst); } #ifdef __AVX__ inline __m256 Exp(__m256 a) { return exp256_ps(a); } inline __m256 Relu(const __m256 a) { __m256 tmp = _mm256_set1_ps(0.0f); return _mm256_max_ps(a, tmp); } inline __m256 Sigmoid(const __m256 a) { __m256 tmp = _mm256_sub_ps(_mm256_set1_ps(0.0f), a); tmp = Exp(tmp); tmp = _mm256_add_ps(_mm256_set1_ps(1.0f), tmp); tmp = _mm256_div_ps(_mm256_set1_ps(1.0f), tmp); return tmp; } inline __m256 Tanh(const __m256 a) { __m256 tmp = _mm256_mul_ps(_mm256_set1_ps(-2.0f), a); tmp = Exp(tmp); return _mm256_sub_ps(_mm256_div_ps(_mm256_set1_ps(2.0f), _mm256_add_ps(_mm256_set1_ps(1.0f), tmp)), _mm256_set1_ps(1.0f)); } inline __m256 Identity(const __m256 a) { return a; } static Active<__m256>::Act_m256 k_act_avx[] = { nullptr, &Sigmoid, &Relu, &Tanh, nullptr, nullptr, &Identity, &Sigmoid, &Tanh, nullptr }; inline __m256 avx_activation(__m256 a, int index) { return k_act_avx[index](a); } #endif } // namespace math } // namespace saber } // namespace anakin #endif //ANAKIN_SABER_FUNCS_IMPL_X86_MATH_ACTIVATION_FUNCTIONS_H
uniform_grid_environment.h
// ----------------------------------------------------------------------------- // // Copyright (C) 2021 CERN & Newcastle University for the benefit of the // BioDynaMo collaboration. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_ENVIRONMENT_UNIFORM_GRID_ENVIRONMENT_H_ #define CORE_ENVIRONMENT_UNIFORM_GRID_ENVIRONMENT_H_ #include <assert.h> #include <omp.h> #include <algorithm> #include <array> #include <atomic> #include <cmath> #include <iostream> #include <limits> #include <memory> #include <mutex> #ifdef LINUX #include <parallel/algorithm> #endif // LINUX #include <utility> #include <vector> #include <morton/morton.h> // NOLINT #include "core/container/agent_vector.h" #include "core/container/fixed_size_vector.h" #include "core/container/inline_vector.h" #include "core/container/math_array.h" #include "core/container/parallel_resize_vector.h" #include "core/environment/environment.h" #include "core/functor.h" #include "core/param/param.h" #include "core/resource_manager.h" #include "core/util/log.h" #include "core/util/spinlock.h" namespace bdm { namespace detail { struct InitializeGPUData; } // namespace detail /// A class that represents Cartesian 3D grid class UniformGridEnvironment : public Environment { // MechanicalForcesOpCuda needs access to some UniformGridEnvironment private // members to reconstruct // the grid on GPU (same for MechanicalForcesOpOpenCL) friend struct MechanicalForcesOpCuda; friend struct ::bdm::detail::InitializeGPUData; friend struct MechanicalForcesOpOpenCL; friend class SchedulerTest; public: /// A single unit cube of the grid struct Box { Spinlock lock_; // std::atomic<bool> timestamp_; uint32_t timestamp_; /// start value of the linked list of agents inside this box. /// Next element can be found at `successors_[start_]` AgentHandle start_; /// length of the linked list (i.e. number of agents) /// uint64_t, because sizeof(Box) = 16, for uint16_t and uint64_t uint16_t length_; Box() : timestamp_(0), start_(AgentHandle()), length_(0) {} /// Copy Constructor required for boxes_.resize() /// Since box values will be overwritten afterwards it forwards to the /// default ctor Box(const Box& other) : Box() {} Box& operator=(const Box& other) { // start_ = other.start_.load(std::memory_order_relaxed); // length_ = other.length_.load(std::memory_order_relaxed); start_ = other.start_; length_ = other.length_; return *this; } bool IsEmpty(uint64_t grid_timestamp) const { return grid_timestamp != timestamp_; } /// @brief Adds an agent to this box /// /// @param[in] agent The object's identifier /// @param AddObject successors The successors void AddObject(AgentHandle ah, AgentVector<AgentHandle>* successors, UniformGridEnvironment* grid) { std::lock_guard<Spinlock> lock_guard(lock_); if (timestamp_ != grid->timestamp_) { timestamp_ = grid->timestamp_; length_ = 1; start_ = ah; } else { length_++; (*successors)[ah] = start_; start_ = ah; } } /// An iterator that iterates over the cells in this box struct Iterator { Iterator(UniformGridEnvironment* grid, const Box* box) : grid_(grid), current_value_(box->start_), countdown_(box->length_) { if (grid->timestamp_ != box->timestamp_) { countdown_ = 0; } } bool IsAtEnd() { return countdown_ <= 0; } Iterator& operator++() { countdown_--; if (countdown_ > 0) { current_value_ = grid_->successors_[current_value_]; } return *this; } AgentHandle operator*() const { return current_value_; } /// Pointer to the neighbor grid; for accessing the successor_ list UniformGridEnvironment* grid_; /// The current agent to be considered AgentHandle current_value_; /// The remain number of agents to consider int countdown_ = 0; }; Iterator begin() const { // NOLINT auto* grid = static_cast<UniformGridEnvironment*>( Simulation::GetActive()->GetEnvironment()); return Iterator(grid, this); } }; /// An iterator that iterates over the boxes in this grid struct NeighborIterator { explicit NeighborIterator( const FixedSizeVector<const Box*, 27>& neighbor_boxes, uint64_t grid_timestamp) : neighbor_boxes_(neighbor_boxes), // start iterator from box 0 box_iterator_(neighbor_boxes_[0]->begin()), grid_timestamp_(grid_timestamp) { // if first box is empty if (neighbor_boxes_[0]->IsEmpty(grid_timestamp)) { ForwardToNonEmptyBox(grid_timestamp); } } bool IsAtEnd() const { return is_end_; } AgentHandle operator*() const { return *box_iterator_; } /// Version where empty neighbor boxes are allowed NeighborIterator& operator++() { ++box_iterator_; // if iterator of current box has come to an end, continue with next box if (box_iterator_.IsAtEnd()) { return ForwardToNonEmptyBox(grid_timestamp_); } return *this; } private: /// The 27 neighbor boxes that will be searched for agents const FixedSizeVector<const Box*, 27>& neighbor_boxes_; /// The box that shall be considered to iterate over for finding simulation /// objects typename Box::Iterator box_iterator_; uint64_t grid_timestamp_; /// The id of the box to be considered (i.e. value between 0 - 26) uint16_t box_idx_ = 0; /// Flag to indicate that all the neighbor boxes have been searched through bool is_end_ = false; /// Forwards the iterator to the next non empty box and returns itself /// If there are no non empty boxes is_end_ is set to true NeighborIterator& ForwardToNonEmptyBox(uint64_t grid_timestamp) { // increment box id until non empty box has been found while (++box_idx_ < neighbor_boxes_.size()) { // box is empty or uninitialized (padding box) -> continue if (neighbor_boxes_[box_idx_]->IsEmpty(grid_timestamp)) { continue; } // a non-empty box has been found box_iterator_ = neighbor_boxes_[box_idx_]->begin(); return *this; } // all remaining boxes have been empty; reached end is_end_ = true; return *this; } }; /// Enum that determines the degree of adjacency in search neighbor boxes // todo(ahmad): currently only kHigh is supported (hardcoded 26 several // places) enum Adjacency { kLow, /**< The closest 8 neighboring boxes */ kMedium, /**< The closest 18 neighboring boxes */ kHigh /**< The closest 26 neighboring boxes */ }; explicit UniformGridEnvironment(Adjacency adjacency = kHigh) : adjacency_(adjacency) {} UniformGridEnvironment(UniformGridEnvironment const&) = delete; void operator=(UniformGridEnvironment const&) = delete; virtual ~UniformGridEnvironment() {} /// Clears the grid void Clear() override { box_length_ = 1; largest_object_size_ = 0; num_boxes_axis_ = {{0}}; num_boxes_xy_ = 0; int32_t inf = std::numeric_limits<int32_t>::max(); grid_dimensions_ = {inf, -inf, inf, -inf, inf, -inf}; threshold_dimensions_ = {inf, -inf}; successors_.clear(); has_grown_ = false; } struct AssignToBoxesFunctor : public Functor<void, Agent*, AgentHandle> { explicit AssignToBoxesFunctor(UniformGridEnvironment* grid) : grid_(grid) {} void operator()(Agent* agent, AgentHandle ah) override { const auto& position = agent->GetPosition(); auto idx = grid_->GetBoxIndex(position); auto box = grid_->GetBoxPointer(idx); box->AddObject(ah, &(grid_->successors_), grid_); agent->SetBoxIdx(idx); } private: UniformGridEnvironment* grid_ = nullptr; }; /// Updates the grid, as agents may have moved, added or deleted void Update() override { auto* rm = Simulation::GetActive()->GetResourceManager(); if (rm->GetNumAgents() != 0) { Clear(); timestamp_++; auto inf = Math::kInfinity; std::array<double, 6> tmp_dim = {{inf, -inf, inf, -inf, inf, -inf}}; CalcSimDimensionsAndLargestAgent(&tmp_dim, &largest_object_size_); RoundOffGridDimensions(tmp_dim); auto los = ceil(largest_object_size_); assert(los > 0 && "The largest object size was found to be 0. Please check if your " "cells are correctly initialized."); box_length_ = los; for (int i = 0; i < 3; i++) { int dimension_length = grid_dimensions_[2 * i + 1] - grid_dimensions_[2 * i]; int r = dimension_length % box_length_; // If the grid is not perfectly divisible along each dimension by the // resolution, extend the grid so that it is if (r != 0) { // std::abs for the case that box_length_ > dimension_length grid_dimensions_[2 * i + 1] += (box_length_ - r); } else { // Else extend the grid dimension with one row, because the outmost // object lies exactly on the border grid_dimensions_[2 * i + 1] += box_length_; } } // Pad the grid to avoid out of bounds check when search neighbors for (int i = 0; i < 3; i++) { grid_dimensions_[2 * i] -= box_length_; grid_dimensions_[2 * i + 1] += box_length_; } // Calculate how many boxes fit along each dimension for (int i = 0; i < 3; i++) { int dimension_length = grid_dimensions_[2 * i + 1] - grid_dimensions_[2 * i]; assert((dimension_length % box_length_ == 0) && "The grid dimensions are not a multiple of its box length"); num_boxes_axis_[i] = dimension_length / box_length_; } num_boxes_xy_ = num_boxes_axis_[0] * num_boxes_axis_[1]; auto total_num_boxes = num_boxes_xy_ * num_boxes_axis_[2]; CheckGridGrowth(); // resize boxes_ if (boxes_.size() != total_num_boxes) { if (boxes_.capacity() < total_num_boxes) { boxes_.reserve(total_num_boxes * 2); } boxes_.resize(total_num_boxes); } successors_.reserve(); // Assign agents to boxes AssignToBoxesFunctor functor(this); rm->ForEachAgentParallel(1000, functor); auto* param = Simulation::GetActive()->GetParam(); if (param->bound_space) { int min = param->min_bound; int max = param->max_bound; threshold_dimensions_ = {min, max}; } if (param->thread_safety_mechanism == Param::ThreadSafetyMechanism::kAutomatic) { nb_mutex_builder_->Update(); } } else { // There are no agents in this simulation auto* param = Simulation::GetActive()->GetParam(); bool uninitialized = boxes_.size() == 0; if (uninitialized && param->bound_space) { // Simulation has never had any agents // Initialize grid dimensions with `Param::min_bound` and // `Param::max_bound` // This is required for the DiffusionGrid int min = param->min_bound; int max = param->max_bound; grid_dimensions_ = {min, max, min, max, min, max}; threshold_dimensions_ = {min, max}; has_grown_ = true; } else if (!uninitialized) { // all agents have been removed in the last iteration // grid state remains the same, but we have to set has_grown_ to false // otherwise the DiffusionGrid will attempt to resize has_grown_ = false; } else { Log::Fatal( "UniformGridEnvironment", "You tried to initialize an empty simulation without bound space. " "Therefore we cannot determine the size of the simulation space. " "Please add agents, or set Param::bound_space, " "Param::min_bound, and Param::max_bound."); } } } /// @brief Calculates the squared euclidian distance between two points /// in 3D /// /// @param[in] pos1 Position of the first point /// @param[in] pos2 Position of the second point /// /// @return The distance between the two points /// inline double SquaredEuclideanDistance(const Double3& pos1, const Double3& pos2) const { const double dx = pos2[0] - pos1[0]; const double dy = pos2[1] - pos1[1]; const double dz = pos2[2] - pos1[2]; return (dx * dx + dy * dy + dz * dz); } inline bool WithinSquaredEuclideanDistance(double squared_radius, const Double3& pos1, const Double3& pos2) const { const double dx = pos2[0] - pos1[0]; const double dx2 = dx * dx; if (dx2 > squared_radius) { return false; } const double dy = pos2[1] - pos1[1]; const double dy2_plus_dx2 = dy * dy + dx2; if (dy2_plus_dx2 > squared_radius) { return false; } const double dz = pos2[2] - pos1[2]; const double distance = dz * dz + dy2_plus_dx2; return distance < squared_radius; } void UpdateBoxZOrder() { // iterate boxes in Z-order / morton order // TODO(lukas) this is a very quick attempt to test an idea // improve performance of this brute force solution zorder_sorted_boxes_.resize(boxes_.size()); const uint32_t nx = num_boxes_axis_[0]; const uint32_t ny = num_boxes_axis_[1]; const uint32_t nz = num_boxes_axis_[2]; #pragma omp parallel for collapse(3) for (uint32_t x = 0; x < nx; x++) { for (uint32_t y = 0; y < ny; y++) { for (uint32_t z = 0; z < nz; z++) { auto box_idx = GetBoxIndex(std::array<uint32_t, 3>{x, y, z}); auto morton = libmorton::morton3D_64_encode(x, y, z); zorder_sorted_boxes_[box_idx] = std::pair<uint32_t, const Box*>{morton, &boxes_[box_idx]}; } } } #ifdef LINUX __gnu_parallel::sort( zorder_sorted_boxes_.begin(), zorder_sorted_boxes_.end(), [](const auto& lhs, const auto& rhs) { return lhs.first < rhs.first; }); #else std::sort( zorder_sorted_boxes_.begin(), zorder_sorted_boxes_.end(), [](const auto& lhs, const auto& rhs) { return lhs.first < rhs.first; }); #endif // LINUX } /// This method iterates over all elements. Iteration is performed in /// Z-order of boxes. There is no particular order for elements inside a box. void IterateZOrder(Functor<void, const AgentHandle&>& callback) override { UpdateBoxZOrder(); for (uint64_t i = 0; i < zorder_sorted_boxes_.size(); i++) { auto it = zorder_sorted_boxes_[i].second->begin(); while (!it.IsAtEnd()) { callback(*it); ++it; } } } /// @brief Applies the given lambda to each neighbor /// /// @param[in] lambda The operation as a lambda /// @param query The query object void ForEachNeighbor(const std::function<void(const Agent*)>& lambda, const Agent& query) const { auto idx = query.GetBoxIdx(); FixedSizeVector<const Box*, 27> neighbor_boxes; GetMooreBoxes(&neighbor_boxes, idx); auto* rm = Simulation::GetActive()->GetResourceManager(); NeighborIterator ni(neighbor_boxes, timestamp_); while (!ni.IsAtEnd()) { auto* agent = rm->GetAgent(*ni); if (agent != &query) { lambda(agent); } ++ni; } } /// @brief Applies the given lambda to each neighbor or the specified /// agent. /// /// In simulation code do not use this function directly. Use the same /// function from the exeuction context (e.g. `InPlaceExecutionContext`) /// /// @param[in] lambda The operation as a lambda /// @param query The query object /// void ForEachNeighbor(Functor<void, const Agent*, double>& lambda, const Agent& query) override { const auto& position = query.GetPosition(); auto idx = query.GetBoxIdx(); FixedSizeVector<const Box*, 27> neighbor_boxes; GetMooreBoxes(&neighbor_boxes, idx); auto* rm = Simulation::GetActive()->GetResourceManager(); NeighborIterator ni(neighbor_boxes, timestamp_); const unsigned batch_size = 64; uint64_t size = 0; Agent* agents[batch_size] __attribute__((aligned(64))); double x[batch_size] __attribute__((aligned(64))); double y[batch_size] __attribute__((aligned(64))); double z[batch_size] __attribute__((aligned(64))); double squared_distance[batch_size] __attribute__((aligned(64))); auto process_batch = [&]() { #pragma omp simd for (uint64_t i = 0; i < size; ++i) { const double dx = x[i] - position[0]; const double dy = y[i] - position[1]; const double dz = z[i] - position[2]; squared_distance[i] = dx * dx + dy * dy + dz * dz; } for (uint64_t i = 0; i < size; ++i) { lambda(agents[i], squared_distance[i]); } size = 0; }; while (!ni.IsAtEnd()) { auto ah = *ni; // increment iterator already here to hide memory latency ++ni; auto* agent = rm->GetAgent(ah); if (agent != &query) { agents[size] = agent; const auto& pos = agent->GetPosition(); x[size] = pos[0]; y[size] = pos[1]; z[size] = pos[2]; size++; if (size == batch_size) { process_batch(); } } } process_batch(); } /// @brief Applies the given lambda to each neighbor or the specified /// agent. /// /// In simulation code do not use this function directly. Use the same /// function from the exeuction context (e.g. `InPlaceExecutionContext`) /// /// @param[in] lambda The operation as a lambda /// @param query The query object /// @param[in] squared_radius The search radius squared /// void ForEachNeighborWithinRadius( const std::function<void(const Agent*)>& lambda, const Agent& query, double squared_radius) { const auto& position = query.GetPosition(); auto idx = query.GetBoxIdx(); FixedSizeVector<const Box*, 27> neighbor_boxes; GetMooreBoxes(&neighbor_boxes, idx); auto* rm = Simulation::GetActive()->GetResourceManager(); NeighborIterator ni(neighbor_boxes, timestamp_); while (!ni.IsAtEnd()) { // Do something with neighbor object auto* agent = rm->GetAgent(*ni); if (agent != &query) { const auto& neighbor_position = agent->GetPosition(); if (this->WithinSquaredEuclideanDistance(squared_radius, position, neighbor_position)) { lambda(agent); } } ++ni; } } /// @brief Return the box index in the one dimensional array of the box /// that contains the position /// /// @param[in] position The position of the object /// /// @return The box index. /// size_t GetBoxIndex(const Double3& position) const { std::array<uint32_t, 3> box_coord; box_coord[0] = (floor(position[0]) - grid_dimensions_[0]) / box_length_; box_coord[1] = (floor(position[1]) - grid_dimensions_[2]) / box_length_; box_coord[2] = (floor(position[2]) - grid_dimensions_[4]) / box_length_; return GetBoxIndex(box_coord); } /// Gets the size of the largest object in the grid double GetLargestObjectSize() const override { return largest_object_size_; } const std::array<int32_t, 6>& GetDimensions() const override { return grid_dimensions_; } const std::array<int32_t, 2>& GetDimensionThresholds() const override { return threshold_dimensions_; } uint64_t GetNumBoxes() const { return boxes_.size(); } uint32_t GetBoxLength() { return box_length_; } std::array<uint32_t, 3> GetBoxCoordinates(size_t box_idx) const { std::array<uint32_t, 3> box_coord; box_coord[2] = box_idx / num_boxes_xy_; auto remainder = box_idx % num_boxes_xy_; box_coord[1] = remainder / num_boxes_axis_[0]; box_coord[0] = remainder % num_boxes_axis_[0]; return box_coord; } /// @brief Gets the information about the grid /// /// @param box_length The grid's box length /// @param num_boxes_axis The number boxes along each axis of the grid /// @param grid_dimensions The grid's dimensions /// /// @tparam TUint32 A uint32 type (could also be cl_uint) /// @tparam TInt32 A int32 type (could be cl_int) /// void GetGridInfo(uint32_t* box_length, uint32_t* num_boxes_axis, int32_t* grid_dimensions) { *box_length = box_length_; num_boxes_axis[0] = num_boxes_axis_[0]; num_boxes_axis[1] = num_boxes_axis_[1]; num_boxes_axis[2] = num_boxes_axis_[2]; grid_dimensions[0] = grid_dimensions_[0]; grid_dimensions[1] = grid_dimensions_[2]; grid_dimensions[2] = grid_dimensions_[4]; } // NeighborMutex --------------------------------------------------------- /// This class ensures thread-safety for the InPlaceExecutionContext for the /// case /// that an agent modifies its neighbors. class GridNeighborMutexBuilder : public Environment::NeighborMutexBuilder { public: /// The NeighborMutex class is a synchronization primitive that can be /// used to protect agents data from being simultaneously accessed by /// multiple threads. class GridNeighborMutex : public Environment::NeighborMutexBuilder::NeighborMutex { public: GridNeighborMutex(const FixedSizeVector<uint64_t, 27>& mutex_indices, GridNeighborMutexBuilder* mutex_builder) : mutex_indices_(mutex_indices), mutex_builder_(mutex_builder) { // Deadlocks occur if mutliple threads try to acquire the same locks, // but in different order. // -> sort to avoid deadlocks - see lock ordering std::sort(mutex_indices_.begin(), mutex_indices_.end()); } virtual ~GridNeighborMutex() {} void lock() override { // NOLINT for (auto idx : mutex_indices_) { auto& mutex = mutex_builder_->mutexes_[idx].mutex_; // acquire lock (and spin if another thread is holding it) while (mutex.test_and_set(std::memory_order_acquire)) { } } } void unlock() override { // NOLINT for (auto idx : mutex_indices_) { auto& mutex = mutex_builder_->mutexes_[idx].mutex_; mutex.clear(std::memory_order_release); } } void SetMutexIndices(const FixedSizeVector<uint64_t, 27>& indices) { mutex_indices_ = indices; std::sort(mutex_indices_.begin(), mutex_indices_.end()); } private: FixedSizeVector<uint64_t, 27> mutex_indices_; GridNeighborMutexBuilder* mutex_builder_; }; /// Used to store mutexes in a vector. /// Always creates a new mutex (even for the copy constructor) struct MutexWrapper { MutexWrapper() {} MutexWrapper(const MutexWrapper&) {} std::atomic_flag mutex_ = ATOMIC_FLAG_INIT; }; virtual ~GridNeighborMutexBuilder() {} void Update() { auto* grid = static_cast<UniformGridEnvironment*>( Simulation::GetActive()->GetEnvironment()); mutexes_.resize(grid->GetNumBoxes()); } NeighborMutex* GetMutex(uint64_t box_idx) override { auto* grid = static_cast<UniformGridEnvironment*>( Simulation::GetActive()->GetEnvironment()); FixedSizeVector<uint64_t, 27> box_indices; grid->GetMooreBoxIndices(&box_indices, box_idx); thread_local GridNeighborMutex* mutex = new GridNeighborMutex(box_indices, this); mutex->SetMutexIndices(box_indices); return mutex; } private: /// one mutex for each box in `UniformGridEnvironment::boxes_` std::vector<MutexWrapper> mutexes_; }; /// Returns the `NeighborMutexBuilder`. The client use it to create a /// `NeighborMutex`. NeighborMutexBuilder* GetNeighborMutexBuilder() override { return nb_mutex_builder_.get(); } private: /// The vector containing all the boxes in the grid /// Using parallel resize vector to enable parallel initialization and thus /// better scalability. ParallelResizeVector<Box> boxes_; /// is incremented at each call to Update /// This is used to decide if boxes should be reinitialized uint32_t timestamp_ = 0; /// Length of a Box uint32_t box_length_ = 1; /// Stores the number of boxes for each axis std::array<uint32_t, 3> num_boxes_axis_ = {{0}}; /// Number of boxes in the xy plane (=num_boxes_axis_[0] * num_boxes_axis_[1]) size_t num_boxes_xy_ = 0; /// Implements linked list - array index = key, value: next element /// /// // Usage /// AgentHandle current_element = ...; /// AgentHandle next_element = successors_[current_element]; AgentVector<AgentHandle> successors_; /// Determines which boxes to search neighbors in (see enum Adjacency) Adjacency adjacency_; /// The size of the largest object in the simulation double largest_object_size_ = 0; /// Cube which contains all agents /// {x_min, x_max, y_min, y_max, z_min, z_max} std::array<int32_t, 6> grid_dimensions_; /// Stores the min / max dimension value that need to be surpassed in order /// to trigger a diffusion grid change std::array<int32_t, 2> threshold_dimensions_; /// stores pairs of <box morton code, box pointer> sorted by morton code. ParallelResizeVector<std::pair<uint32_t, const Box*>> zorder_sorted_boxes_; /// Holds instance of NeighborMutexBuilder. /// NeighborMutexBuilder is updated if `Param::thread_safety_mechanism` /// is set to `kAutomatic` std::unique_ptr<GridNeighborMutexBuilder> nb_mutex_builder_ = std::make_unique<GridNeighborMutexBuilder>(); void CheckGridGrowth() { // Determine if the grid dimensions have changed (changed in the sense that // the grid has grown outwards) auto min_gd = *std::min_element(grid_dimensions_.begin(), grid_dimensions_.end()); auto max_gd = *std::max_element(grid_dimensions_.begin(), grid_dimensions_.end()); if (min_gd < threshold_dimensions_[0]) { threshold_dimensions_[0] = min_gd; has_grown_ = true; } if (max_gd > threshold_dimensions_[1]) { Log::Info("UniformGridEnvironment", "Your agents are getting near the edge of " "the simulation space. Be aware of boundary conditions that " "may come into play!"); threshold_dimensions_[1] = max_gd; has_grown_ = true; } } void RoundOffGridDimensions(const std::array<double, 6>& grid_dimensions) { grid_dimensions_[0] = floor(grid_dimensions[0]); grid_dimensions_[2] = floor(grid_dimensions[2]); grid_dimensions_[4] = floor(grid_dimensions[4]); grid_dimensions_[1] = ceil(grid_dimensions[1]); grid_dimensions_[3] = ceil(grid_dimensions[3]); grid_dimensions_[5] = ceil(grid_dimensions[5]); } /// @brief Gets the Moore (i.e adjacent) boxes of the query boxAlso adds /// the /// query box. /// /// @param[out] neighbor_boxes The neighbor boxes /// @param[in] box_idx The query box /// void GetMooreBoxes(FixedSizeVector<const Box*, 27>* neighbor_boxes, size_t box_idx) const { neighbor_boxes->push_back(GetBoxPointer(box_idx)); // Adjacent 6 (top, down, left, right, front and back) if (adjacency_ >= kLow) { neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_xy_)); neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_xy_)); neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_axis_[0])); neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_axis_[0])); neighbor_boxes->push_back(GetBoxPointer(box_idx - 1)); neighbor_boxes->push_back(GetBoxPointer(box_idx + 1)); } // Adjacent 12 if (adjacency_ >= kMedium) { neighbor_boxes->push_back( GetBoxPointer(box_idx - num_boxes_xy_ - num_boxes_axis_[0])); neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_xy_ - 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx - num_boxes_axis_[0] - 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx + num_boxes_xy_ - num_boxes_axis_[0])); neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_xy_ - 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx + num_boxes_axis_[0] - 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx - num_boxes_xy_ + num_boxes_axis_[0])); neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_xy_ + 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx - num_boxes_axis_[0] + 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx + num_boxes_xy_ + num_boxes_axis_[0])); neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_xy_ + 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx + num_boxes_axis_[0] + 1)); } // Adjacent 8 if (adjacency_ >= kHigh) { neighbor_boxes->push_back( GetBoxPointer(box_idx - num_boxes_xy_ - num_boxes_axis_[0] - 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx - num_boxes_xy_ - num_boxes_axis_[0] + 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx - num_boxes_xy_ + num_boxes_axis_[0] - 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx - num_boxes_xy_ + num_boxes_axis_[0] + 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx + num_boxes_xy_ - num_boxes_axis_[0] - 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx + num_boxes_xy_ - num_boxes_axis_[0] + 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx + num_boxes_xy_ + num_boxes_axis_[0] - 1)); neighbor_boxes->push_back( GetBoxPointer(box_idx + num_boxes_xy_ + num_boxes_axis_[0] + 1)); } } /// @brief Gets the box indices of all adjacent boxes. Also adds the /// query box index. /// /// @param[out] box_indices Result containing all box indices /// @param[in] box_idx The query box /// void GetMooreBoxIndices(FixedSizeVector<uint64_t, 27>* box_indices, size_t box_idx) const { box_indices->push_back(box_idx); // Adjacent 6 (top, down, left, right, front and back) if (adjacency_ >= kLow) { box_indices->push_back(box_idx - num_boxes_xy_); box_indices->push_back(box_idx + num_boxes_xy_); box_indices->push_back(box_idx - num_boxes_axis_[0]); box_indices->push_back(box_idx + num_boxes_axis_[0]); box_indices->push_back(box_idx - 1); box_indices->push_back(box_idx + 1); } // Adjacent 12 if (adjacency_ >= kMedium) { box_indices->push_back(box_idx - num_boxes_xy_ - num_boxes_axis_[0]); box_indices->push_back(box_idx - num_boxes_xy_ - 1); box_indices->push_back(box_idx - num_boxes_axis_[0] - 1); box_indices->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0]); box_indices->push_back(box_idx + num_boxes_xy_ - 1); box_indices->push_back(box_idx + num_boxes_axis_[0] - 1); box_indices->push_back(box_idx - num_boxes_xy_ + num_boxes_axis_[0]); box_indices->push_back(box_idx - num_boxes_xy_ + 1); box_indices->push_back(box_idx - num_boxes_axis_[0] + 1); box_indices->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0]); box_indices->push_back(box_idx + num_boxes_xy_ + 1); box_indices->push_back(box_idx + num_boxes_axis_[0] + 1); } // Adjacent 8 if (adjacency_ >= kHigh) { box_indices->push_back(box_idx - num_boxes_xy_ - num_boxes_axis_[0] - 1); box_indices->push_back(box_idx - num_boxes_xy_ - num_boxes_axis_[0] + 1); box_indices->push_back(box_idx - num_boxes_xy_ + num_boxes_axis_[0] - 1); box_indices->push_back(box_idx - num_boxes_xy_ + num_boxes_axis_[0] + 1); box_indices->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] - 1); box_indices->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] + 1); box_indices->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] - 1); box_indices->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] + 1); } } /// Determines current box based on parameter box_idx and adds it together /// with half of the surrounding boxes to the vector. /// Legend: C = center, N = north, E = east, S = south, W = west, F = front, /// B = back /// For each box pair which is centro-symmetric only one box is taken -- /// e.g. E-W: E, or BNW-FSE: BNW /// /// (x-axis to the right \ y-axis up) /// z=1 /// +-----+----+-----+ /// | BNW | BN | BNE | /// +-----+----+-----+ /// | NW | N | NE | /// +-----+----+-----+ /// | FNW | FN | FNE | /// +-----+----+-----+ /// /// z = 0 /// +-----+----+-----+ /// | BW | B | BE | /// +-----+----+-----+ /// | W | C | E | /// +-----+----+-----+ /// | FW | F | FE | /// +-----+----+-----+ /// /// z = -1 /// +-----+----+-----+ /// | BSW | BS | BSE | /// +-----+----+-----+ /// | SW | S | SE | /// +-----+----+-----+ /// | FSW | FS | FSE | /// +-----+----+-----+ /// void GetHalfMooreBoxIndices(FixedSizeVector<size_t, 14>* neighbor_boxes, size_t box_idx) const { // C neighbor_boxes->push_back(box_idx); // BW neighbor_boxes->push_back(box_idx + num_boxes_axis_[0] - 1); // FNW neighbor_boxes->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] - 1); // NW neighbor_boxes->push_back(box_idx + num_boxes_xy_ - 1); // BNW neighbor_boxes->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] - 1); // B neighbor_boxes->push_back(box_idx + num_boxes_axis_[0]); // FN neighbor_boxes->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0]); // N neighbor_boxes->push_back(box_idx + num_boxes_xy_); // BN neighbor_boxes->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0]); // E neighbor_boxes->push_back(box_idx + 1); // BE neighbor_boxes->push_back(box_idx + num_boxes_axis_[0] + 1); // FNE neighbor_boxes->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] + 1); // NE neighbor_boxes->push_back(box_idx + num_boxes_xy_ + 1); // BNE neighbor_boxes->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] + 1); } /// @brief Gets the pointer to the box with the given index /// /// @param[in] index The index of the box /// /// @return The pointer to the box /// const Box* GetBoxPointer(size_t index) const { return &(boxes_[index]); } /// @brief Gets the pointer to the box with the given index /// /// @param[in] index The index of the box /// /// @return The pointer to the box /// Box* GetBoxPointer(size_t index) { return &(boxes_[index]); } /// Returns the box index in the one dimensional array based on box /// coordinates in space /// /// @param box_coord box coordinates in space (x, y, z) /// /// @return The box index. /// size_t GetBoxIndex(const std::array<uint32_t, 3>& box_coord) const { return box_coord[2] * num_boxes_xy_ + box_coord[1] * num_boxes_axis_[0] + box_coord[0]; } }; } // namespace bdm #endif // CORE_ENVIRONMENT_UNIFORM_GRID_ENVIRONMENT_H_
elemwise_binary_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file elemwise_binary_op.h * \brief Function definition of elementwise binary operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #include <mxnet/operator_util.h> #include <mxnet/op_attr_types.h> #include <vector> #include <string> #include <utility> #include <typeinfo> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../../engine/openmp.h" #include "elemwise_unary_op.h" #include "../../common/utils.h" #include "./init_op.h" namespace mxnet { namespace op { /*! Gather binary operator functions into ElemwiseBinaryOp class */ class ElemwiseBinaryOp : public OpBase { public: /*! \brief For sparse, assume missing rvalue is 0 */ template<typename OP, int Req> struct MissingRValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0))); } }; /*! \brief For sparse, assume missing lvalue is 0 */ template<typename OP, int Req> struct MissingLValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i])); } }; private: /*! * \brief CSR operation requires temp space */ enum ResourceRequestType { kTempSpace }; /*! * \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input * CPU-Only version */ template<typename DType, typename OP, typename xpu> static inline size_t FillDense(mshadow::Stream<xpu> *s, const size_t idx_l, const size_t idx_r, const OpReqType req, mshadow::Tensor<xpu, 2, DType> *out, const size_t iter_out) { const int index_out_min = static_cast<int>(std::min(idx_l, idx_r)); if (static_cast<size_t>(index_out_min) > iter_out) { const DType zero_input_val = OP::Map(DType(0), DType(0)); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) { Fill<false>(s, (*out)[i], req, zero_input_val); } } return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int' } static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) { return a1.var() == a2.var(); } /*! \brief Minimum of three */ static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) { return a < b ? (a < c ? a : c) : (b < c ? b : c); } template<typename xpu, typename LOP, typename ROP, typename DType> static void BackwardUseNone_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; Stream<xpu> *s = ctx.get_stream<xpu>(); const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes); const DType *ograd_dptr = inputs[0].dptr<DType>(); if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>()); } else if (req[0] != kNullOp) { DType *lgrad_dptr = outputs[0].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr); }); } if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>()); } else if (req[1] != kNullOp) { DType *rgrad_dptr = outputs[1].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr); }); } } template<typename xpu, typename LOP, typename ROP, typename DType> static void BackwardUseIn_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(outputs.size(), 2U); DCHECK_EQ(inputs.size(), 3U); mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>(); const DType *ograd_dptr = inputs[0].dptr<DType>(); const DType *lhs_dptr = inputs[1].dptr<DType>(); const DType *rhs_dptr = inputs[2].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { const int size = static_cast<int>( (outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * lgrad_dptr = outputs[0].dptr<DType>(); mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, xpu>::Launch( s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { const int size = static_cast<int>( (outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * rgrad_dptr = outputs[1].dptr<DType>(); mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, xpu>::Launch( s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); } template< typename xpu, typename LOP, typename ROP, typename DType, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false, typename BackupCompute> static inline void BackwardUseInEx_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs, BackupCompute backup_compute) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); // lhs grad if (req[0] != kNullOp) { // RspRspOp can handle dense outputs so long as OP(0, 0) == 0 MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, LOP>( s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0], false, false, false, false); }); // lhs in-place MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, op::mshadow_op::mul>( s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0], false, false, true, false); }); } // rhs grad if (req[1] != kNullOp) { MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, ROP>( s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1], false, false, false, false); }); // rhs in-place MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, op::mshadow_op::mul>( s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1], false, false, true, false); }); } } protected: /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename DType, typename IType, typename OP> static void RspRspOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename DType, typename IType, typename CType, typename OP> static inline void CsrCsrOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); public: /*! * \brief Rsp-op-Rsp operation which produces a dense result * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); /*! * \brief Allow one of the inputs to be dense and still produce a sparse output * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ template<bool lhs_dense_ok = true, bool rhs_dense_ok = true> static bool AllowLRDenseInputWithSparseOutputStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name; CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name; const auto& lhs_stype = in_attrs->at(0); const auto& rhs_stype = in_attrs->at(1); auto& out_stype = out_attrs->at(0); bool dispatched = false; const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && lhs_stype == kDefaultStorage && rhs_stype == kDefaultStorage) { // dns, dns -> dns dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched) { if ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (rhs_dense_ok && lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_dense_ok && lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) { // rsp, rsp -> rsp // rsp, dns -> rsp // dns, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { // csr, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex); } else if ((lhs_stype == kCSRStorage && rhs_dense_ok) || (rhs_stype == kCSRStorage && lhs_dense_ok)) { // csr, dns -> csr // dns, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, DispatchMode::kFComputeFallback); } } if (!dispatched) { dispatched = dispatch_fallback(out_attrs, dispatch_mode); } return dispatched; } /*! * \brief Backward pass computing input gradient using forward inputs * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); }); }); } } template<typename xpu, typename OP> static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); }); }); } } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); if ((common::ContainsOnlyStorage(inputs, kRowSparseStorage)) && (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) { // rsp, rsp -> rsp // rsp, rsp -> dns const int rsp_input_idx = lhs_stype == kRowSparseStorage ? 0 : 1; MSHADOW_IDX_TYPE_SWITCH(inputs[rsp_input_idx].aux_type(rowsparse::kIdx), IType, { MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { RspRspOp<DType, IType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false); }); }); } else if (common::ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) { // csr, csr -> csr MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIdx), IType, { MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIndPtr), CType, { MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { CsrCsrOp<DType, IType, CType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]); }); }); }); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } /*! \brief ComputeEx allowing dense lvalue and/or rvalue */ template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense> static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) && ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && lhs_may_be_dense && rhs_may_be_dense) { // rsp, rsp -> rsp // rsp, rsp -> dns // rsp, dns -> rsp // dns, rsp -> rsp // More than once dense not allowed (this will be checked in RspRspOp): // rsp, dns -> dns <-- NOT ALLOWED // dns, rsp -> dns <-- NOT ALLOWED mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(outputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false); }); }); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 1U); // output grad CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto in_stype = inputs[0].storage_type(); const auto lhs_stype = outputs[0].storage_type(); const auto rhs_stype = outputs[1].storage_type(); // lhs grad if (req[0] != kNullOp) { if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> rsp, _. op requires 0-input returns 0-output DCHECK_LT(fabs(static_cast<float>(LOP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } // rhs grad if (req[1] != kNullOp) { if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> _, rsp. op requires 0-input returns 0-output DCHECK_LT(fabs(static_cast<float>(ROP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template< typename xpu, typename LOP, typename ROP, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false> static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto lhs_grad_stype = outputs[0].storage_type(); const auto rhs_grad_stype = outputs[1].storage_type(); if (ContainsOnlyStorage(inputs, kRowSparseStorage) && (lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) && (rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) { // rsp, rsp, rsp -> [dns, rsp], [dns, rsp] MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { BackwardUseInEx_<xpu, LOP, ROP, DType, in0_ok_dense, in1_ok_dense, in2_ok_dense>( attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>); }); } } }; // class ElemwiseBinaryOp /*! \brief Binary launch */ #define MXNET_OPERATOR_REGISTER_BINARY(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(2) \ .set_num_outputs(1) \ .set_attr<nnvm::FListInputNames>("FListInputNames", \ [](const NodeAttrs& attrs) { \ return std::vector<std::string>{"lhs", "rhs"}; \ }) \ .set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \ }) \ .add_argument("lhs", "NDArray-or-Symbol", "first input") \ .add_argument("rhs", "NDArray-or-Symbol", "second input") /*! \brief Binary launch, with FComputeEx for csr and rsp available */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseStorageType<2, 1, true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, dense result * FInferStorageType attr is not set using this macro. * By default DefaultStorageType is used. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::SparseSparseWithDenseResult) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
GB_binop__eq_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_fc64) // A.*B function (eWiseMult): GB (_AemultB_08__eq_fc64) // A.*B function (eWiseMult): GB (_AemultB_02__eq_fc64) // A.*B function (eWiseMult): GB (_AemultB_04__eq_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_fc64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__eq_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__eq_fc64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_fc64) // C=scalar+B GB (_bind1st__eq_fc64) // C=scalar+B' GB (_bind1st_tran__eq_fc64) // C=A+scalar GB (_bind2nd__eq_fc64) // C=A'+scalar GB (_bind2nd_tran__eq_fc64) // C type: bool // A type: GxB_FC64_t // A pattern? 0 // B type: GxB_FC64_t // B pattern? 0 // BinaryOp: cij = GB_FC64_eq (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = (creal (GBX (Ax, pA, A_iso)) != 0) || (cimag (GBX (Ax, pA, A_iso)) != 0) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = (creal (GBX (Bx, pB, B_iso)) != 0) || (cimag (GBX (Bx, pB, B_iso)) != 0) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC64_eq (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_FC64 || GxB_NO_EQ_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__eq_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC64_t alpha_scalar ; GxB_FC64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC64_eq (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC64_eq (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_eq (x, aij) ; \ } GrB_Info GB (_bind1st_tran__eq_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_eq (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__eq_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
test.c
#include <stdio.h> #include <omp.h> #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (1024*3) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; double S[N]; double p[2]; INIT(); long cpuExec = 0; #pragma omp target map(tofrom: cpuExec) { cpuExec = omp_is_initial_device(); } int max_threads = 224; #undef FOR_CLAUSES #define FOR_CLAUSES #include "defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; PARALLEL( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], SUMS * (N/2*(N+1)))) } // // Test: private clause on omp for. // #undef FOR_CLAUSES #define FOR_CLAUSES private(p,q) #include "defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; PARALLEL( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ } , { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + SUMS * (N/2*(N+1)))) } // // Test: firstprivate clause on omp for. // #undef FOR_CLAUSES #define FOR_CLAUSES firstprivate(p,q) #include "defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; PARALLEL( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ } , { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], SUMS * (N/2*(N+1)))) } // // Test: lastprivate clause on omp for. // double q0[1], q1[1], q2[1], q3[1], q4[1], q5[1], q6[1], q7[1], q8[1], q9[1]; for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TEST({ S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } _Pragma("omp parallel if(threads[0] > 1) num_threads(threads[0])") { _Pragma("omp for lastprivate(q0)") for (int i = 0; i < N; i++) { q0[0] = C[i] + D[i]; A[i] += q0[0]; } _Pragma("omp for schedule(auto) lastprivate(q1)") for (int i = 0; i < N; i++) { q1[0] = D[i] + E[i]; B[i] += q1[0]; } _Pragma("omp for schedule(dynamic) lastprivate(q2)") for (int i = 0; i < N; i++) { q2[0] = C[i] + D[i]; A[i] += q2[0]; } _Pragma("omp for schedule(guided) lastprivate(q3)") for (int i = 0; i < N; i++) { q3[0] = D[i] + E[i]; B[i] += q3[0]; } _Pragma("omp for schedule(runtime) lastprivate(q4)") for (int i = 0; i < N; i++) { q4[0] = C[i] + D[i]; A[i] += q4[0]; } _Pragma("omp for schedule(static) lastprivate(q5)") for (int i = 0; i < N; i++) { q5[0] = D[i] + E[i]; B[i] += q5[0]; } _Pragma("omp for schedule(static,1) lastprivate(q6)") for (int i = 0; i < N; i++) { q6[0] = C[i] + D[i]; A[i] += q6[0]; } _Pragma("omp for schedule(static,9) lastprivate(q7)") for (int i = 0; i < N; i++) { q7[0] = D[i] + E[i]; B[i] += q7[0]; } _Pragma("omp for schedule(static,13) lastprivate(q8)") for (int i = 0; i < N; i++) { q8[0] = C[i] + D[i]; A[i] += q8[0]; } _Pragma("omp for schedule(static,30000) lastprivate(q9)") for (int i = 0; i < N; i++) { q9[0] = D[i] + E[i]; B[i] += q9[0]; } } double tmp = q0[0] + q1[0] + q2[0] + q3[0] + q4[0] + \ q5[0] + q6[0] + q7[0] + q8[0] + q9[0]; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], 5 * (N + (N/2*(N+1))) )); } // // Test: private clause on omp for. // #undef FOR_CLAUSES #define FOR_CLAUSES private(p) #include "defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; PARALLEL( p[0] = 2; p[1] = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p[0] = C[i] + D[i]; \ p[1] = D[i] + E[i]; \ A[i] += p[0]; \ B[i] += p[1]; \ } , { double tmp = p[0] + p[1]; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + SUMS * (N/2*(N+1)))) } // // Test: firstprivate clause on omp for. // #undef FOR_CLAUSES #define FOR_CLAUSES firstprivate(p) #include "defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; PARALLEL( p[0] = -4; p[1] = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p[0]; \ B[i] += D[i] + E[i] + p[1]; \ if (i == N-1) { \ p[0] += 6; \ p[1] += 9; \ } \ } , { double tmp = p[0] + p[1]; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], SUMS * (N/2*(N+1)))) } // // Test: collapse clause on omp for. // #undef FOR_CLAUSES #define FOR_CLAUSES collapse(2) #include "defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; PARALLEL( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], SUMS * (N/2*(N+1)))) } // // Test: ordered clause on omp for. // #undef FOR_CLAUSES #define FOR_CLAUSES ordered #include "defines.h" for (int t = 0; t <= max_threads; t += max_threads) { int threads[1]; threads[0] = t; PARALLEL( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], SUMS * (N/2*(N+1)))) } // // Test: nowait clause on omp for. // FIXME: Not sure how to test for correctness. // for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TEST({ S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } _Pragma("omp parallel if(threads[0] > 1) num_threads(threads[0])") { _Pragma("omp for nowait schedule(static,1)") for (int i = 0; i < N; i++) { A[i] = C[i] + D[i]; } _Pragma("omp for nowait schedule(static,1)") for (int i = 0; i < N; i++) { B[i] = A[i] + D[i] + E[i]; } _Pragma("omp barrier") if (omp_get_thread_num() == 0) { double tmp = 0; for (int i = 0; i < N; i++) { tmp += B[i]; } S[0] += tmp; } } }, VERIFY(0, 1, S[0], (N/2*(N+1)) )); } // // Test: Ensure coalesced scheduling on GPU. // if (!cpuExec) { TEST({ S[0] = 0; for (int i = 0; i < 99; i++) { A[i] = 0; } _Pragma("omp parallel num_threads(33)") { _Pragma("omp for") for (int i = 0; i < 99; i++) { A[i] += i - omp_get_thread_num(); } _Pragma("omp for schedule(auto)") for (int i = 0; i < 99; i++) { A[i] += i - omp_get_thread_num(); } _Pragma("omp for schedule(static,1)") for (int i = 0; i < 99; i++) { A[i] += i - omp_get_thread_num(); } } double tmp = 0; for (int i = 0; i < 99; i++) { tmp += A[i]; } S[0] = tmp; }, VERIFY(0, 1, S[0], 3 * (33*33 + 66*33) )); } else { DUMP_SUCCESS(1); } // // Test: Ensure that we have barriers after dynamic, guided, // and ordered schedules, even with a nowait clause since the // NVPTX runtime doesn't currently support concurrent execution // of these constructs. // FIXME: Not sure how to test for correctness at runtime. // if (!cpuExec) { TEST({ for (int i = 0; i < N; i++) { A[i] = 0; } _Pragma("omp parallel") { _Pragma("omp for nowait schedule(guided)") for (int i = 0; i < N; i++) { A[i] += C[i] + D[i]; } _Pragma("omp for nowait schedule(dynamic)") for (int i = 0; i < N; i++) { A[i] += D[i] + E[i]; } _Pragma("omp for nowait ordered") for (int i = 0; i < N; i++) { A[i] += C[i] + D[i]; } } }, VERIFY(0, N, A[i], 2*i+2) ); } else { DUMP_SUCCESS(1); } // // Test: Linear clause on target // if (!cpuExec) { int l = 0; ZERO(A); #pragma omp target map(tofrom:A) #pragma omp parallel for linear(l:2) for(int i = 0 ; i < 10 ; i++) A[i] = l; int fail = 0; for(int i = 0 ; i < 10 ; i++) if(A[i] != i*2) { printf("error at %d, val = %lf expected = %d\n", i, A[i], i*2); fail = 1; } if(fail) printf("Error\n"); else printf("Succeeded\n"); } else { DUMP_SUCCESS(1); } return 0; }
quicksort.h
// -*- C++ -*- // Copyright (C) 2007-2018 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/quicksort.h * @brief Implementation of a unbalanced parallel quicksort (in-place). * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Johannes Singler. #ifndef _GLIBCXX_PARALLEL_QUICKSORT_H #define _GLIBCXX_PARALLEL_QUICKSORT_H 1 #include <parallel/parallel.h> #include <parallel/partition.h> namespace __gnu_parallel { /** @brief Unbalanced quicksort divide step. * @param __begin Begin iterator of subsequence. * @param __end End iterator of subsequence. * @param __comp Comparator. * @param __pivot_rank Desired __rank of the pivot. * @param __num_samples Choose pivot from that many samples. * @param __num_threads Number of threads that are allowed to work on * this part. */ template<typename _RAIter, typename _Compare> typename std::iterator_traits<_RAIter>::difference_type __parallel_sort_qs_divide(_RAIter __begin, _RAIter __end, _Compare __comp, typename std::iterator_traits <_RAIter>::difference_type __pivot_rank, typename std::iterator_traits <_RAIter>::difference_type __num_samples, _ThreadIndex __num_threads) { typedef std::iterator_traits<_RAIter> _TraitsType; typedef typename _TraitsType::value_type _ValueType; typedef typename _TraitsType::difference_type _DifferenceType; _DifferenceType __n = __end - __begin; __num_samples = std::min(__num_samples, __n); // Allocate uninitialized, to avoid default constructor. _ValueType* __samples = static_cast<_ValueType*> (::operator new(__num_samples * sizeof(_ValueType))); for (_DifferenceType __s = 0; __s < __num_samples; ++__s) { const unsigned long long __index = static_cast<unsigned long long> (__s) * __n / __num_samples; ::new(&(__samples[__s])) _ValueType(__begin[__index]); } __gnu_sequential::sort(__samples, __samples + __num_samples, __comp); _ValueType& __pivot = __samples[__pivot_rank * __num_samples / __n]; __gnu_parallel::__binder2nd<_Compare, _ValueType, _ValueType, bool> __pred(__comp, __pivot); _DifferenceType __split = __parallel_partition(__begin, __end, __pred, __num_threads); for (_DifferenceType __s = 0; __s < __num_samples; ++__s) __samples[__s].~_ValueType(); ::operator delete(__samples); return __split; } /** @brief Unbalanced quicksort conquer step. * @param __begin Begin iterator of subsequence. * @param __end End iterator of subsequence. * @param __comp Comparator. * @param __num_threads Number of threads that are allowed to work on * this part. */ template<typename _RAIter, typename _Compare> void __parallel_sort_qs_conquer(_RAIter __begin, _RAIter __end, _Compare __comp, _ThreadIndex __num_threads) { typedef std::iterator_traits<_RAIter> _TraitsType; typedef typename _TraitsType::value_type _ValueType; typedef typename _TraitsType::difference_type _DifferenceType; if (__num_threads <= 1) { __gnu_sequential::sort(__begin, __end, __comp); return; } _DifferenceType __n = __end - __begin, __pivot_rank; if (__n <= 1) return; _ThreadIndex __num_threads_left; if ((__num_threads % 2) == 1) __num_threads_left = __num_threads / 2 + 1; else __num_threads_left = __num_threads / 2; __pivot_rank = __n * __num_threads_left / __num_threads; _DifferenceType __split = __parallel_sort_qs_divide (__begin, __end, __comp, __pivot_rank, _Settings::get().sort_qs_num_samples_preset, __num_threads); #pragma omp parallel sections num_threads(2) { #pragma omp section __parallel_sort_qs_conquer(__begin, __begin + __split, __comp, __num_threads_left); #pragma omp section __parallel_sort_qs_conquer(__begin + __split, __end, __comp, __num_threads - __num_threads_left); } } /** @brief Unbalanced quicksort main call. * @param __begin Begin iterator of input sequence. * @param __end End iterator input sequence, ignored. * @param __comp Comparator. * @param __num_threads Number of threads that are allowed to work on * this part. */ template<typename _RAIter, typename _Compare> void __parallel_sort_qs(_RAIter __begin, _RAIter __end, _Compare __comp, _ThreadIndex __num_threads) { _GLIBCXX_CALL(__n) typedef std::iterator_traits<_RAIter> _TraitsType; typedef typename _TraitsType::value_type _ValueType; typedef typename _TraitsType::difference_type _DifferenceType; _DifferenceType __n = __end - __begin; // At least one element per processor. if (__num_threads > __n) __num_threads = static_cast<_ThreadIndex>(__n); __parallel_sort_qs_conquer( __begin, __begin + __n, __comp, __num_threads); } } //namespace __gnu_parallel #endif /* _GLIBCXX_PARALLEL_QUICKSORT_H */
work.c
#define PY_SSIZE_T_CLEAN #include <Python.h> #include <time.h> #ifdef HAVE_CL_CL_H #include <CL/cl.h> #elif HAVE_OPENCL_OPENCL_H #include <OpenCL/opencl.h> #else #include <omp.h> #include "blake2.h" #endif #if defined(HAVE_CL_CL_H) || defined(HAVE_OPENCL_OPENCL_H) // this is the variable opencl_program in nano-node/nano/node/openclwork.cpp const char *opencl_program = "\n\ enum Blake2b_IV {\n\ iv0 = 0x6a09e667f3bcc908UL,\n\ iv1 = 0xbb67ae8584caa73bUL,\n\ iv2 = 0x3c6ef372fe94f82bUL,\n\ iv3 = 0xa54ff53a5f1d36f1UL,\n\ iv4 = 0x510e527fade682d1UL,\n\ iv5 = 0x9b05688c2b3e6c1fUL,\n\ iv6 = 0x1f83d9abfb41bd6bUL,\n\ iv7 = 0x5be0cd19137e2179UL,\n\ };\n\ \n\ enum IV_Derived {\n\ nano_xor_iv0 = 0x6a09e667f2bdc900UL, // iv1 ^ 0x1010000 ^ outlen\n\ nano_xor_iv4 = 0x510e527fade682f9UL, // iv4 ^ inbytes\n\ nano_xor_iv6 = 0xe07c265404be4294UL, // iv6 ^ ~0\n\ };\n\ \n\ #ifdef cl_amd_media_ops\n\ #pragma OPENCL EXTENSION cl_amd_media_ops : enable\n\ static inline ulong rotr64(ulong x, int shift)\n\ {\n\ uint2 x2 = as_uint2(x);\n\ if (shift < 32)\n\ return as_ulong(amd_bitalign(x2.s10, x2, shift));\n\ return as_ulong(amd_bitalign(x2, x2.s10, (shift - 32)));\n\ }\n\ #else\n\ static inline ulong rotr64(ulong x, int shift)\n\ {\n\ return rotate(x, 64UL - shift);\n\ }\n\ #endif\n\ \n\ #define G32(m0, m1, m2, m3, vva, vb1, vb2, vvc, vd1, vd2) \\\n\ do { \\\n\ vva += (ulong2)(vb1 + m0, vb2 + m2); \\\n\ vd1 = rotr64(vd1 ^ vva.s0, 32); \\\n\ vd2 = rotr64(vd2 ^ vva.s1, 32); \\\n\ vvc += (ulong2)(vd1, vd2); \\\n\ vb1 = rotr64(vb1 ^ vvc.s0, 24); \\\n\ vb2 = rotr64(vb2 ^ vvc.s1, 24); \\\n\ vva += (ulong2)(vb1 + m1, vb2 + m3); \\\n\ vd1 = rotr64(vd1 ^ vva.s0, 16); \\\n\ vd2 = rotr64(vd2 ^ vva.s1, 16); \\\n\ vvc += (ulong2)(vd1, vd2); \\\n\ vb1 = rotr64(vb1 ^ vvc.s0, 63); \\\n\ vb2 = rotr64(vb2 ^ vvc.s1, 63); \\\n\ } while (0)\n\ \n\ #define G2v(m0, m1, m2, m3, a, b, c, d) \\\n\ G32(m0, m1, m2, m3, vv[a / 2], vv[b / 2].s0, vv[b / 2].s1, vv[c / 2], \\\n\ vv[d / 2].s0, vv[d / 2].s1)\n\ \n\ #define G2v_split(m0, m1, m2, m3, a, vb1, vb2, c, vd1, vd2) \\\n\ G32(m0, m1, m2, m3, vv[a / 2], vb1, vb2, vv[c / 2], vd1, vd2)\n\ \n\ #define ROUND(m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, \\\n\ m15) \\\n\ do { \\\n\ G2v(m0, m1, m2, m3, 0, 4, 8, 12); \\\n\ G2v(m4, m5, m6, m7, 2, 6, 10, 14); \\\n\ G2v_split(m8, m9, m10, m11, 0, vv[5 / 2].s1, vv[6 / 2].s0, 10, \\\n\ vv[15 / 2].s1, vv[12 / 2].s0); \\\n\ G2v_split(m12, m13, m14, m15, 2, vv[7 / 2].s1, vv[4 / 2].s0, 8, \\\n\ vv[13 / 2].s1, vv[14 / 2].s0); \\\n\ } while (0)\n\ \n\ static inline ulong blake2b(ulong const nonce, __constant ulong *h)\n\ {\n\ ulong2 vv[8] = {\n\ {nano_xor_iv0, iv1}, {iv2, iv3}, {iv4, iv5},\n\ {iv6, iv7}, {iv0, iv1}, {iv2, iv3},\n\ {nano_xor_iv4, iv5}, {nano_xor_iv6, iv7},\n\ };\n\ \n\ ROUND(nonce, h[0], h[1], h[2], h[3], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);\n\ ROUND(0, 0, h[3], 0, 0, 0, 0, 0, h[0], 0, nonce, h[1], 0, 0, 0, h[2]);\n\ ROUND(0, 0, 0, nonce, 0, h[1], 0, 0, 0, 0, h[2], 0, 0, h[0], 0, h[3]);\n\ ROUND(0, 0, h[2], h[0], 0, 0, 0, 0, h[1], 0, 0, 0, h[3], nonce, 0, 0);\n\ ROUND(0, nonce, 0, 0, h[1], h[3], 0, 0, 0, h[0], 0, 0, 0, 0, h[2], 0);\n\ ROUND(h[1], 0, 0, 0, nonce, 0, 0, h[2], h[3], 0, 0, 0, 0, 0, h[0], 0);\n\ ROUND(0, 0, h[0], 0, 0, 0, h[3], 0, nonce, 0, 0, h[2], 0, h[1], 0, 0);\n\ ROUND(0, 0, 0, 0, 0, h[0], h[2], 0, 0, nonce, 0, h[3], 0, 0, h[1], 0);\n\ ROUND(0, 0, 0, 0, 0, h[2], nonce, 0, 0, h[1], 0, 0, h[0], h[3], 0, 0);\n\ ROUND(0, h[1], 0, h[3], 0, 0, h[0], 0, 0, 0, 0, 0, h[2], 0, 0, nonce);\n\ ROUND(nonce, h[0], h[1], h[2], h[3], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);\n\ ROUND(0, 0, h[3], 0, 0, 0, 0, 0, h[0], 0, nonce, h[1], 0, 0, 0, h[2]);\n\ \n\ return nano_xor_iv0 ^ vv[0].s0 ^ vv[4].s0;\n\ }\n\ #undef G32\n\ #undef G2v\n\ #undef G2v_split\n\ #undef ROUND\n\ \n\ __kernel void nano_work(__constant ulong *attempt,\n\ __global ulong *result_a,\n\ __constant ulong *item_a,\n\ __constant ulong *difficulty)\n\ {\n\ const ulong attempt_l = *attempt + get_global_id(0);\n\ if (blake2b(attempt_l, item_a) >= *difficulty)\n\ *result_a = attempt_l;\n\ }\n\ "; #endif static uint64_t s[16]; static int p; uint64_t xorshift1024star(void) { // nano-node/nano/node/xorshift.hpp const uint64_t s0 = s[p++]; uint64_t s1 = s[p &= 15]; s1 ^= s1 << 31; // a s1 ^= s1 >> 11; // b s1 ^= s0 ^ (s0 >> 30); // c s[p] = s1; return s1 * (uint64_t)1181783497276652981; } static PyObject *generate(PyObject *self, PyObject *args) { #ifdef USE_VISUAL_C int i, j; #else size_t i, j; #endif uint8_t *h32; uint64_t difficulty = 0, work = 0, nonce = 0; const size_t work_size = 1024 * 1024; // default value from nano Py_ssize_t p0; if (!PyArg_ParseTuple(args, "y#K", &h32, &p0, &difficulty)) return NULL; srand(time(NULL)); for (i = 0; i < 16; i++) for (j = 0; j < 4; j++) ((uint16_t *)&s[i])[j] = rand(); #if defined(HAVE_CL_CL_H) || defined(HAVE_OPENCL_OPENCL_H) int err; cl_uint num; cl_platform_id cpPlatform; err = clGetPlatformIDs(1, &cpPlatform, &num); if (err != CL_SUCCESS) { printf("clGetPlatformIDs failed with error code %d\n", err); goto FAIL; } else if (num == 0) { printf("clGetPlatformIDs failed to find a gpu device\n"); goto FAIL; } else { size_t length = strlen(opencl_program); cl_mem d_nonce, d_work, d_h32, d_difficulty; cl_device_id device_id; cl_context context; cl_command_queue queue; cl_program program; cl_kernel kernel; err = clGetDeviceIDs(cpPlatform, CL_DEVICE_TYPE_GPU, 1, &device_id, NULL); if (err != CL_SUCCESS) { printf("clGetDeviceIDs failed with error code %d\n", err); goto FAIL; } context = clCreateContext(0, 1, &device_id, NULL, NULL, &err); if (err != CL_SUCCESS) { printf("clCreateContext failed with error code %d\n", err); goto FAIL; } #ifndef __APPLE__ queue = clCreateCommandQueueWithProperties(context, device_id, 0, &err); if (err != CL_SUCCESS) { printf("clCreateCommandQueueWithProperties failed with error code %d\n", err); goto FAIL; } #else queue = clCreateCommandQueue(context, device_id, 0, &err); if (err != CL_SUCCESS) { printf("clCreateCommandQueue failed with error code %d\n", err); goto FAIL; } #endif program = clCreateProgramWithSource( context, 1, (const char **)&opencl_program, &length, &err); if (err != CL_SUCCESS) { printf("clCreateProgramWithSource failed with error code %d\n", err); goto FAIL; } err = clBuildProgram(program, 0, NULL, NULL, NULL, NULL); if (err != CL_SUCCESS) { printf("clBuildProgram failed with error code %d\n", err); goto FAIL; } d_nonce = clCreateBuffer(context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR, 8, &nonce, &err); if (err != CL_SUCCESS) { printf("clCreateBuffer failed with error code %d\n", err); goto FAIL; } d_work = clCreateBuffer(context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR, 8, &work, &err); if (err != CL_SUCCESS) { printf("clCreateBuffer failed with error code %d\n", err); goto FAIL; } d_h32 = clCreateBuffer(context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR, 32, h32, &err); if (err != CL_SUCCESS) { printf("clCreateBuffer failed with error code %d\n", err); goto FAIL; } d_difficulty = clCreateBuffer(context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR, 8, &difficulty, &err); if (err != CL_SUCCESS) { printf("clCreateBuffer failed with error code %d\n", err); goto FAIL; } kernel = clCreateKernel(program, "nano_work", &err); if (err != CL_SUCCESS) { printf("clCreateKernel failed with error code %d\n", err); goto FAIL; } err = clSetKernelArg(kernel, 0, sizeof(d_nonce), &d_nonce); if (err != CL_SUCCESS) { printf("clSetKernelArg failed with error code %d\n", err); goto FAIL; } err = clSetKernelArg(kernel, 1, sizeof(d_work), &d_work); if (err != CL_SUCCESS) { printf("clSetKernelArg failed with error code %d\n", err); goto FAIL; } err = clSetKernelArg(kernel, 2, sizeof(d_h32), &d_h32); if (err != CL_SUCCESS) { printf("clSetKernelArg failed with error code %d\n", err); goto FAIL; } err = clSetKernelArg(kernel, 3, sizeof(d_difficulty), &d_difficulty); if (err != CL_SUCCESS) { printf("clSetKernelArg failed with error code %d\n", err); goto FAIL; } err = clEnqueueWriteBuffer(queue, d_h32, CL_FALSE, 0, 32, h32, 0, NULL, NULL); if (err != CL_SUCCESS) { printf("clEnqueueWriteBuffer failed with error code %d\n", err); goto FAIL; } err = clEnqueueWriteBuffer(queue, d_difficulty, CL_FALSE, 0, 8, &difficulty, 0, NULL, NULL); if (err != CL_SUCCESS) { printf("clEnqueueWriteBuffer failed with error code %d\n", err); goto FAIL; } while (work == 0) { nonce = xorshift1024star(); err = clEnqueueWriteBuffer(queue, d_nonce, CL_FALSE, 0, 8, &nonce, 0, NULL, NULL); if (err != CL_SUCCESS) { printf("clEnqueueWriteBuffer failed with error code %d\n", err); goto FAIL; } err = clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &work_size, NULL, 0, NULL, NULL); if (err != CL_SUCCESS) { printf("clEnqueueNDRangeKernel failed with error code %d\n", err); goto FAIL; } err = clEnqueueReadBuffer(queue, d_work, CL_FALSE, 0, 8, &work, 0, NULL, NULL); if (err != CL_SUCCESS) { printf("clEnqueueReadBuffer failed with error code %d\n", err); goto FAIL; } err = clFinish(queue); if (err != CL_SUCCESS) { printf("clFinish failed with error code %d\n", err); goto FAIL; } } err = clReleaseMemObject(d_nonce); if (err != CL_SUCCESS) { printf("clReleaseMemObject failed with error code %d\n", err); goto FAIL; } err = clReleaseMemObject(d_work); if (err != CL_SUCCESS) { printf("clReleaseMemObject failed with error code %d\n", err); goto FAIL; } err = clReleaseMemObject(d_h32); if (err != CL_SUCCESS) { printf("clReleaseMemObject failed with error code %d\n", err); goto FAIL; } err = clReleaseMemObject(d_difficulty); if (err != CL_SUCCESS) { printf("clReleaseMemObject failed with error code %d\n", err); goto FAIL; } err = clReleaseKernel(kernel); if (err != CL_SUCCESS) { printf("clReleaseKernel failed with error code %d\n", err); goto FAIL; } err = clReleaseProgram(program); if (err != CL_SUCCESS) { printf("clReleaseProgram failed with error code %d\n", err); goto FAIL; } err = clReleaseCommandQueue(queue); if (err != CL_SUCCESS) { printf("clReleaseCommandQueue failed with error code %d\n", err); goto FAIL; } err = clReleaseContext(context); if (err != CL_SUCCESS) { printf("clReleaseContext failed with error code %d\n", err); goto FAIL; } } FAIL: #else while (work == 0) { nonce = xorshift1024star(); #pragma omp parallel #pragma omp for for (i = 0; i < work_size; i++) { #ifdef USE_VISUAL_C if (work == 0) { #endif uint64_t nonce_l = nonce + i, b2b_h = 0; blake2b_state b2b; blake2b_init(&b2b, 8); blake2b_update(&b2b, &nonce_l, 8); blake2b_update(&b2b, h32, 32); blake2b_final(&b2b, &b2b_h, 8); #ifdef USE_VISUAL_C if (b2b_h >= difficulty) { #pragma omp critical work = nonce_l; } } #else if (b2b_h >= difficulty) { #pragma omp atomic write work = nonce_l; #pragma omp cancel for } #pragma omp cancellation point for #endif } } #endif return Py_BuildValue("K", work); } static PyMethodDef m_methods[] = {{"generate", generate, METH_VARARGS, NULL}, {NULL, NULL, 0, NULL}}; static struct PyModuleDef work_module = {PyModuleDef_HEAD_INIT, "work", NULL, -1, m_methods}; PyMODINIT_FUNC PyInit_work(void) { PyObject *m = PyModule_Create(&work_module); if (m == NULL) return NULL; return m; }
Simulation.c
#include "XSbench_header.h" void run_event_based_simulation(Inputs in, GridPoint * energy_grid, NuclideGridPoint ** nuclide_grids, int * num_nucs, int ** mats, double ** concs, int mype, unsigned long long * vhash_result) { if( mype == 0) printf("Beginning event based simulation...\n"); unsigned long long vhash = 0; // OpenMP compiler directives - declaring variables as shared or private // The reduction is only needed when in verification mode. #pragma omp parallel default(none) \ shared( in, energy_grid, nuclide_grids, \ mats, concs, num_nucs, mype) \ reduction(+:vhash) { // Initialize parallel PAPI counters #ifdef PAPI int eventset = PAPI_NULL; int num_papi_events; #pragma omp critical { counter_init(&eventset, &num_papi_events); } #endif double * xs = (double *) calloc(5, sizeof(double)); // Initialize RNG seeds for threads int thread = omp_get_thread_num(); // XS Lookup Loop // This loop is independent. Represents lookup events for many particles executed independently in one loop. // i.e., All iterations can be processed in any order and are not related #pragma omp for schedule(guided) for( int i = 0; i < in.lookups; i++ ) { // Status text if( INFO && mype == 0 && thread == 0 && i % 2000 == 0 ) printf("\rCalculating XS's... (%.0lf%% completed)", (i / ( (double) in.lookups / (double) in.nthreads )) / (double) in.nthreads * 100.0); // Particles are seeded by their particle ID unsigned long seed = ((unsigned long) i+ (unsigned long)1)* (unsigned long) 13371337; // Randomly pick an energy and material for the particle double p_energy = rn(&seed); int mat = pick_mat(&seed); // debugging //printf("E = %lf mat = %d\n", p_energy, mat); double macro_xs_vector[5] = {0}; // This returns the macro_xs_vector, but we're not going // to do anything with it in this program, so return value // is written over. calculate_macro_xs( p_energy, mat, in.n_isotopes, in.n_gridpoints, num_nucs, concs, energy_grid, nuclide_grids, mats, macro_xs_vector, in.grid_type, in.hash_bins ); // Copy results from above function call onto heap // so that compiler cannot optimize function out // (only occurs if -flto flag is used) // This operation is only done to avoid optimizing out // calculate_macro_xs -- we do not care about what is // in the "xs" array memcpy(xs, macro_xs_vector, 5*sizeof(double)); // Verification hash calculation // This method provides a consistent hash accross // architectures and compilers. #ifdef VERIFICATION char line[256]; sprintf(line, "%.5lf %d %.5lf %.5lf %.5lf %.5lf %.5lf", p_energy, mat, macro_xs_vector[0], macro_xs_vector[1], macro_xs_vector[2], macro_xs_vector[3], macro_xs_vector[4]); unsigned long long vhash_local = hash(line, 10000); vhash += vhash_local; #endif } // Prints out thread local PAPI counters #ifdef PAPI if( mype == 0 && thread == 0 ) { printf("\n"); border_print(); center_print("PAPI COUNTER RESULTS", 79); border_print(); printf("Count \tSmybol \tDescription\n"); } { #pragma omp barrier } counter_stop(&eventset, num_papi_events); #endif } *vhash_result = vhash; } void run_history_based_simulation(Inputs in, GridPoint * energy_grid, NuclideGridPoint ** nuclide_grids, int * num_nucs, int ** mats, double ** concs, int mype, unsigned long long * vhash_result) { if( mype == 0) printf("Beginning history based simulation...\n"); unsigned long long vhash = 0; // OpenMP compiler directives - declaring variables as shared or private // The reduction is only needed when in verification mode. #pragma omp parallel default(none) \ shared( in, energy_grid, nuclide_grids, \ mats, concs, num_nucs, mype) \ reduction(+:vhash) { // Initialize parallel PAPI counters #ifdef PAPI int eventset = PAPI_NULL; int num_papi_events; #pragma omp critical { counter_init(&eventset, &num_papi_events); } #endif double * xs = (double *) calloc(5, sizeof(double)); // Initialize RNG seeds for threads int thread = omp_get_thread_num(); // Particle loop // (independent - can be processed in any order and in parallel) // Only present in History based method (default) #pragma omp for schedule(guided) for( int p = 0; p < in.particles; p++ ) { // Particles are seeded by their particle ID unsigned long seed = ((unsigned long) p+ (unsigned long)1)* (unsigned long) 13371337; // Randomly pick an energy and material for the particle double p_energy = rn(&seed); int mat = pick_mat(&seed); // Status text if( INFO && mype == 0 && thread == 0 && p % 100 == 0 ) printf("\rCalculating XS's... (%.0lf%% completed)", (p / ( (double)in.particles / (double) in.nthreads )) / (double) in.nthreads * 100.0); // XS Lookup Loop // This loop is dependent! // i.e., Next iteration uses data computed in previous iter. for( int i = 0; i < in.lookups; i++ ) { // debugging //printf("E = %lf mat = %d\n", p_energy, mat); double macro_xs_vector[5] = {0}; // This returns the macro_xs_vector, but we're not going // to do anything with it in this program, so return value // is written over. calculate_macro_xs( p_energy, mat, in.n_isotopes, in.n_gridpoints, num_nucs, concs, energy_grid, nuclide_grids, mats, macro_xs_vector, in.grid_type, in.hash_bins ); // Copy results from above function call onto heap // so that compiler cannot optimize function out // (only occurs if -flto flag is used) // This operation is only done to avoid optimizing out // calculate_macro_xs -- we do not care about what is // in the "xs" array memcpy(xs, macro_xs_vector, 5*sizeof(double)); // Verification hash calculation // This method provides a consistent hash accross // architectures and compilers. #ifdef VERIFICATION char line[256]; sprintf(line, "%.5lf %d %.5lf %.5lf %.5lf %.5lf %.5lf", p_energy, mat, macro_xs_vector[0], macro_xs_vector[1], macro_xs_vector[2], macro_xs_vector[3], macro_xs_vector[4]); unsigned long long vhash_local = hash(line, 10000); vhash += vhash_local; #endif // Randomly pick next energy and material for the particle // Also incorporates results from macro_xs lookup to // enforce loop dependency. // In a real MC app, this dependency is expressed in terms // of branching physics sampling, whereas here we are just // artificially enforcing this dependence based on altering // the seed for( int x = 0; x < 5; x++ ) seed += macro_xs_vector[x] * (x+1)*1337*1337; p_energy = rn(&seed); mat = pick_mat(&seed); } } // Prints out thread local PAPI counters #ifdef PAPI if( mype == 0 && thread == 0 ) { printf("\n"); border_print(); center_print("PAPI COUNTER RESULTS", 79); border_print(); printf("Count \tSmybol \tDescription\n"); } { #pragma omp barrier } counter_stop(&eventset, num_papi_events); #endif } *vhash_result = vhash; }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 16; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(8*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(4*t1+Ny+5,16)),floord(8*t2+Ny+4,16)),floord(8*t1-8*t2+Nz+Ny+3,16));t3++) { for (t4=max(max(max(0,ceild(t1-31,32)),ceild(8*t2-Nz-124,128)),ceild(16*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(4*t1+Nx+5,128)),floord(8*t2+Nx+4,128)),floord(16*t3+Nx+12,128)),floord(8*t1-8*t2+Nz+Nx+3,128));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),16*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),16*t3+14),128*t4+126),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 32; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,3);t1++) { lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6)); ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(3*t1-3*t2-2,4)),ceild(3*t1-6,8)),ceild(24*t2-Nz-19,32));t3<=min(min(min(floord(4*Nt+Ny-9,32),floord(12*t1+Ny+15,32)),floord(24*t2+Ny+11,32)),floord(24*t1-24*t2+Nz+Ny+13,32));t3++) { for (t4=max(max(max(max(0,ceild(3*t1-3*t2-126,128)),ceild(3*t1-254,256)),ceild(24*t2-Nz-1011,1024)),ceild(32*t3-Ny-1011,1024));t4<=min(min(min(min(floord(4*Nt+Nx-9,1024),floord(12*t1+Nx+15,1024)),floord(24*t2+Nx+11,1024)),floord(32*t3+Nx+19,1024)),floord(24*t1-24*t2+Nz+Nx+13,1024));t4++) { for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),8*t3+6),256*t4+254);t5++) { for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) { lbv=max(1024*t4,4*t5+4); ubv=min(1024*t4+1023,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
summary.c
#include <mpi.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> /* getopt */ #include <string.h> /* memset */ #include "../gptl.h" #ifdef THREADED_OMP #include <omp.h> #endif static int iam = 0; static int nproc = 1; /* number of MPI tasks (default 1) */ static int nthreads = 1; /* number of threads (default 1) */ double sub (int); int main (int argc, char **argv) { char pname[MPI_MAX_PROCESSOR_NAME]; int iter; int counter; int c; int tnum = 0; int resultlen; int ret; double value; extern char *optarg; while ((c = getopt (argc, argv, "p:")) != -1) { switch (c) { case 'p': if ((ret = GPTLevent_name_to_code (optarg, &counter)) != 0) { printf ("Failure from GPTLevent_name_to_code\n"); return 1; } if (GPTLsetoption (counter, 1) < 0) { printf ("Failure from GPTLsetoption (%s,1)\n", optarg); return 1; } break; default: printf ("unknown option %c\n", c); printf ("Usage: %s [-p option_name]\n", argv[0]); return 2; } } ret = GPTLsetoption (GPTLabort_on_error, 1); ret = GPTLsetoption (GPTLoverhead, 1); ret = GPTLsetoption (GPTLnarrowprint, 1); if (MPI_Init (&argc, &argv) != MPI_SUCCESS) { printf ("Failure from MPI_Init\n"); return 1; } /* ** If ENABLE_PMPI is set, GPTL was initialized in MPI_Init */ #ifndef ENABLE_PMPI ret = GPTLinitialize (); ret = GPTLstart ("total"); #endif ret = MPI_Comm_rank (MPI_COMM_WORLD, &iam); ret = MPI_Comm_size (MPI_COMM_WORLD, &nproc); ret = MPI_Get_processor_name (pname, &resultlen); printf ("Rank %d is running on processor %s\n", iam, pname); #ifdef THREADED_OMP nthreads = omp_get_max_threads (); #pragma omp parallel for private (iter, ret, tnum) #endif for (iter = 1; iter <= nthreads; iter++) { #ifdef THREADED_OMP tnum = omp_get_thread_num (); #endif printf ("Thread %d of rank %d on processor %s\n", tnum, iam, pname); value = sub (iter); } #ifndef ENABLE_PMPI ret = GPTLstop ("total"); ret = GPTLpr (iam); #endif if (iam == 0) { printf ("summary: testing GPTLpr_summary...\n"); printf ("Number of threads was %d\n", nthreads); printf ("Number of tasks was %d\n", nproc); } if (GPTLpr_summary (MPI_COMM_WORLD) != 0) return 1; if (GPTLpr_summary_file (MPI_COMM_WORLD, "timing.summary.duplicate") != 0) return 1; ret = MPI_Finalize (); if (GPTLfinalize () != 0) return 1; return 0; } double sub (int iter) { unsigned long usec; unsigned long looplen = iam*iter*100000; unsigned long i; double sum; int ret; ret = GPTLstart ("sub"); /* Sleep msec is mpi rank + thread number */ usec = 1000 * (iam * iter); ret = GPTLstart ("sleep"); usleep (usec); ret = GPTLstop ("sleep"); ret = GPTLstart ("work"); sum = 0.; ret = GPTLstart ("add"); for (i = 0; i < looplen; ++i) { sum += i; } ret = GPTLstop ("add"); ret = GPTLstart ("madd"); for (i = 0; i < looplen; ++i) { sum += i*1.1; } ret = GPTLstop ("madd"); ret = GPTLstart ("div"); for (i = 0; i < looplen; ++i) { sum /= 1.1; } ret = GPTLstop ("div"); ret = GPTLstop ("work"); ret = GPTLstop ("sub"); return sum; }
gimplify.c
/* Modula-3: modified */ /* Tree lowering pass. This pass converts the GENERIC functions-as-trees tree representation into the GIMPLE form. Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc. Major work done by Sebastian Pop <s.pop@laposte.net>, Diego Novillo <dnovillo@redhat.com> and Jason Merrill <jason@redhat.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "gimple.h" #include "tree-iterator.h" #include "tree-inline.h" #include "tree-pretty-print.h" #include "langhooks.h" #include "tree-flow.h" #include "cgraph.h" #include "timevar.h" #include "hashtab.h" #include "flags.h" #include "function.h" #include "output.h" #include "ggc.h" #include "diagnostic-core.h" #include "target.h" #include "pointer-set.h" #include "splay-tree.h" #include "vec.h" #include "gimple.h" #include "tree-pass.h" #include "langhooks-def.h" /* FIXME: for lhd_set_decl_assembler_name. */ #include "expr.h" /* FIXME: for can_move_by_pieces and STACK_CHECK_MAX_VAR_SIZE. */ static struct gimplify_ctx *gimplify_ctxp; /* Formal (expression) temporary table handling: multiple occurrences of the same scalar expression are evaluated into the same temporary. */ typedef struct gimple_temp_hash_elt { tree val; /* Key */ tree temp; /* Value */ } elt_t; /* Forward declaration. */ static enum gimplify_status gimplify_compound_expr (tree *, gimple_seq *, bool); /* Mark X addressable. Unlike the langhook we expect X to be in gimple form and we don't do any syntax checking. */ void mark_addressable (tree x) { while (handled_component_p (x)) x = TREE_OPERAND (x, 0); if (TREE_CODE (x) == MEM_REF && TREE_CODE (TREE_OPERAND (x, 0)) == ADDR_EXPR) x = TREE_OPERAND (TREE_OPERAND (x, 0), 0); if (TREE_CODE (x) != VAR_DECL && TREE_CODE (x) != PARM_DECL && TREE_CODE (x) != RESULT_DECL) return; TREE_ADDRESSABLE (x) = 1; } /* Return a hash value for a formal temporary table entry. */ static hashval_t gimple_tree_hash (const void *p) { tree t = ((const elt_t *) p)->val; return iterative_hash_expr (t, 0); } /* Compare two formal temporary table entries. */ static int gimple_tree_eq (const void *p1, const void *p2) { tree t1 = ((const elt_t *) p1)->val; tree t2 = ((const elt_t *) p2)->val; enum tree_code code = TREE_CODE (t1); if (TREE_CODE (t2) != code || TREE_TYPE (t1) != TREE_TYPE (t2)) return 0; if (!operand_equal_p (t1, t2, 0)) return 0; #ifdef ENABLE_CHECKING /* Only allow them to compare equal if they also hash equal; otherwise results are nondeterminate, and we fail bootstrap comparison. */ gcc_assert (gimple_tree_hash (p1) == gimple_tree_hash (p2)); #endif return 1; } /* Link gimple statement GS to the end of the sequence *SEQ_P. If *SEQ_P is NULL, a new sequence is allocated. This function is similar to gimple_seq_add_stmt, but does not scan the operands. During gimplification, we need to manipulate statement sequences before the def/use vectors have been constructed. */ void gimple_seq_add_stmt_without_update (gimple_seq *seq_p, gimple gs) { gimple_stmt_iterator si; if (gs == NULL) return; if (*seq_p == NULL) *seq_p = gimple_seq_alloc (); si = gsi_last (*seq_p); gsi_insert_after_without_update (&si, gs, GSI_NEW_STMT); } /* Shorter alias name for the above function for use in gimplify.c only. */ static inline void gimplify_seq_add_stmt (gimple_seq *seq_p, gimple gs) { gimple_seq_add_stmt_without_update (seq_p, gs); } /* Append sequence SRC to the end of sequence *DST_P. If *DST_P is NULL, a new sequence is allocated. This function is similar to gimple_seq_add_seq, but does not scan the operands. During gimplification, we need to manipulate statement sequences before the def/use vectors have been constructed. */ static void gimplify_seq_add_seq (gimple_seq *dst_p, gimple_seq src) { gimple_stmt_iterator si; if (src == NULL) return; if (*dst_p == NULL) *dst_p = gimple_seq_alloc (); si = gsi_last (*dst_p); gsi_insert_seq_after_without_update (&si, src, GSI_NEW_STMT); } /* Set up a context for the gimplifier. */ void push_gimplify_context (struct gimplify_ctx *c) { memset (c, '\0', sizeof (*c)); c->prev_context = gimplify_ctxp; gimplify_ctxp = c; } /* Tear down a context for the gimplifier. If BODY is non-null, then put the temporaries into the outer BIND_EXPR. Otherwise, put them in the local_decls. BODY is not a sequence, but the first tuple in a sequence. */ void pop_gimplify_context (gimple body) { struct gimplify_ctx *c = gimplify_ctxp; gcc_assert (c && (c->bind_expr_stack == NULL || VEC_empty (gimple, c->bind_expr_stack))); VEC_free (gimple, heap, c->bind_expr_stack); gimplify_ctxp = c->prev_context; if (body) declare_vars (c->temps, body, false); else record_vars (c->temps); if (c->temp_htab) htab_delete (c->temp_htab); } /* Push a GIMPLE_BIND tuple onto the stack of bindings. */ static void gimple_push_bind_expr (gimple gimple_bind) { if (gimplify_ctxp->bind_expr_stack == NULL) gimplify_ctxp->bind_expr_stack = VEC_alloc (gimple, heap, 8); VEC_safe_push (gimple, heap, gimplify_ctxp->bind_expr_stack, gimple_bind); } /* Pop the first element off the stack of bindings. */ static void gimple_pop_bind_expr (void) { VEC_pop (gimple, gimplify_ctxp->bind_expr_stack); } /* Return the first element of the stack of bindings. */ gimple gimple_current_bind_expr (void) { return VEC_last (gimple, gimplify_ctxp->bind_expr_stack); } /* Return the stack of bindings created during gimplification. */ VEC(gimple, heap) * gimple_bind_expr_stack (void) { return gimplify_ctxp->bind_expr_stack; } /* Return true iff there is a COND_EXPR between us and the innermost CLEANUP_POINT_EXPR. This info is used by gimple_push_cleanup. */ static bool gimple_conditional_context (void) { return gimplify_ctxp->conditions > 0; } /* Note that we've entered a COND_EXPR. */ static void gimple_push_condition (void) { #ifdef ENABLE_GIMPLE_CHECKING if (gimplify_ctxp->conditions == 0) gcc_assert (gimple_seq_empty_p (gimplify_ctxp->conditional_cleanups)); #endif ++(gimplify_ctxp->conditions); } /* Note that we've left a COND_EXPR. If we're back at unconditional scope now, add any conditional cleanups we've seen to the prequeue. */ static void gimple_pop_condition (gimple_seq *pre_p) { int conds = --(gimplify_ctxp->conditions); gcc_assert (conds >= 0); if (conds == 0) { gimplify_seq_add_seq (pre_p, gimplify_ctxp->conditional_cleanups); gimplify_ctxp->conditional_cleanups = NULL; } } /* Both gimplify the statement T and append it to *SEQ_P. This function behaves exactly as gimplify_stmt, but you don't have to pass T as a reference. */ void gimplify_and_add (tree t, gimple_seq *seq_p) { gimplify_stmt (&t, seq_p); } /* Strip off a legitimate source ending from the input string NAME of length LEN. Rather than having to know the names used by all of our front ends, we strip off an ending of a period followed by up to five characters. (Java uses ".class".) */ static inline void remove_suffix (char *name, int len) { int i; for (i = 2; i < 8 && len > i; i++) { if (name[len - i] == '.') { name[len - i] = '\0'; break; } } } /* Create a new temporary name with PREFIX. Return an identifier. */ static GTY(()) unsigned int tmp_var_id_num; tree create_tmp_var_name (const char *prefix) { char *tmp_name; if (prefix) { char *preftmp = ASTRDUP (prefix); remove_suffix (preftmp, strlen (preftmp)); clean_symbol_name (preftmp); prefix = preftmp; } ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix ? prefix : "T", tmp_var_id_num++); return get_identifier (tmp_name); } /* Create a new temporary variable declaration of type TYPE. Do NOT push it into the current binding. */ tree create_tmp_var_raw (tree type, const char *prefix) { tree tmp_var; tmp_var = build_decl (input_location, VAR_DECL, prefix ? create_tmp_var_name (prefix) : NULL, type); /* The variable was declared by the compiler. */ DECL_ARTIFICIAL (tmp_var) = 1; /* And we don't want debug info for it. */ DECL_IGNORED_P (tmp_var) = 1; /* Make the variable writable. */ TREE_READONLY (tmp_var) = 0; DECL_EXTERNAL (tmp_var) = 0; TREE_STATIC (tmp_var) = 0; TREE_USED (tmp_var) = 1; return tmp_var; } /* Create a new temporary variable declaration of type TYPE. DO push the variable into the current binding. Further, assume that this is called only from gimplification or optimization, at which point the creation of certain types are bugs. */ tree create_tmp_var (tree type, const char *prefix) { tree tmp_var; /* We don't allow types that are addressable (meaning we can't make copies), or incomplete. We also used to reject every variable size objects here, but now support those for which a constant upper bound can be obtained. The processing for variable sizes is performed in gimple_add_tmp_var, point at which it really matters and possibly reached via paths not going through this function, e.g. after direct calls to create_tmp_var_raw. */ gcc_assert (!TREE_ADDRESSABLE (type) && COMPLETE_TYPE_P (type)); tmp_var = create_tmp_var_raw (type, prefix); gimple_add_tmp_var (tmp_var); return tmp_var; } /* Create a new temporary variable declaration of type TYPE by calling create_tmp_var and if TYPE is a vector or a complex number, mark the new temporary as gimple register. */ tree create_tmp_reg (tree type, const char *prefix) { tree tmp; tmp = create_tmp_var (type, prefix); if (TREE_CODE (type) == COMPLEX_TYPE || TREE_CODE (type) == VECTOR_TYPE) DECL_GIMPLE_REG_P (tmp) = 1; return tmp; } /* Create a temporary with a name derived from VAL. Subroutine of lookup_tmp_var; nobody else should call this function. */ static inline tree create_tmp_from_val (tree val) { /* Drop all qualifiers and address-space information from the value type. */ return create_tmp_var (TYPE_MAIN_VARIANT (TREE_TYPE (val)), get_name (val)); } /* Create a temporary to hold the value of VAL. If IS_FORMAL, try to reuse an existing expression temporary. */ static tree lookup_tmp_var (tree val, bool is_formal) { tree ret; /* If not optimizing, never really reuse a temporary. local-alloc won't allocate any variable that is used in more than one basic block, which means it will go into memory, causing much extra work in reload and final and poorer code generation, outweighing the extra memory allocation here. */ if (!optimize || !is_formal || TREE_SIDE_EFFECTS (val)) ret = create_tmp_from_val (val); else { elt_t elt, *elt_p; void **slot; elt.val = val; if (gimplify_ctxp->temp_htab == NULL) gimplify_ctxp->temp_htab = htab_create (1000, gimple_tree_hash, gimple_tree_eq, free); slot = htab_find_slot (gimplify_ctxp->temp_htab, (void *)&elt, INSERT); if (*slot == NULL) { elt_p = XNEW (elt_t); elt_p->val = val; elt_p->temp = ret = create_tmp_from_val (val); *slot = (void *) elt_p; } else { elt_p = (elt_t *) *slot; ret = elt_p->temp; } } return ret; } /* Return true if T is a CALL_EXPR or an expression that can be assigned to a temporary. Note that this predicate should only be used during gimplification. See the rationale for this in gimplify_modify_expr. */ static bool is_gimple_reg_rhs_or_call (tree t) { return (get_gimple_rhs_class (TREE_CODE (t)) != GIMPLE_INVALID_RHS || TREE_CODE (t) == CALL_EXPR); } /* Return true if T is a valid memory RHS or a CALL_EXPR. Note that this predicate should only be used during gimplification. See the rationale for this in gimplify_modify_expr. */ static bool is_gimple_mem_rhs_or_call (tree t) { /* If we're dealing with a renamable type, either source or dest must be a renamed variable. */ if (is_gimple_reg_type (TREE_TYPE (t))) return is_gimple_val (t); else return (is_gimple_val (t) || is_gimple_lvalue (t) || TREE_CODE (t) == CALL_EXPR); } /* Helper for get_formal_tmp_var and get_initialized_tmp_var. */ static tree internal_get_tmp_var (tree val, gimple_seq *pre_p, gimple_seq *post_p, bool is_formal) { tree t, mod; /* Notice that we explicitly allow VAL to be a CALL_EXPR so that we can create an INIT_EXPR and convert it into a GIMPLE_CALL below. */ gimplify_expr (&val, pre_p, post_p, is_gimple_reg_rhs_or_call, fb_rvalue); t = lookup_tmp_var (val, is_formal); if (is_formal && (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE || TREE_CODE (TREE_TYPE (t)) == VECTOR_TYPE)) DECL_GIMPLE_REG_P (t) = 1; mod = build2 (INIT_EXPR, TREE_TYPE (t), t, unshare_expr (val)); SET_EXPR_LOCATION (mod, EXPR_LOC_OR_HERE (val)); /* gimplify_modify_expr might want to reduce this further. */ gimplify_and_add (mod, pre_p); ggc_free (mod); /* If we're gimplifying into ssa, gimplify_modify_expr will have given our temporary an SSA name. Find and return it. */ if (gimplify_ctxp->into_ssa) { gimple last = gimple_seq_last_stmt (*pre_p); t = gimple_get_lhs (last); } return t; } /* Return a formal temporary variable initialized with VAL. PRE_P is as in gimplify_expr. Only use this function if: 1) The value of the unfactored expression represented by VAL will not change between the initialization and use of the temporary, and 2) The temporary will not be otherwise modified. For instance, #1 means that this is inappropriate for SAVE_EXPR temps, and #2 means it is inappropriate for && temps. For other cases, use get_initialized_tmp_var instead. */ tree get_formal_tmp_var (tree val, gimple_seq *pre_p) { return internal_get_tmp_var (val, pre_p, NULL, true); } /* Return a temporary variable initialized with VAL. PRE_P and POST_P are as in gimplify_expr. */ tree get_initialized_tmp_var (tree val, gimple_seq *pre_p, gimple_seq *post_p) { return internal_get_tmp_var (val, pre_p, post_p, false); } /* Declare all the variables in VARS in SCOPE. If DEBUG_INFO is true, generate debug info for them; otherwise don't. */ void declare_vars (tree vars, gimple scope, bool debug_info) { tree last = vars; if (last) { tree temps, block; gcc_assert (gimple_code (scope) == GIMPLE_BIND); temps = nreverse (last); block = gimple_bind_block (scope); gcc_assert (!block || TREE_CODE (block) == BLOCK); if (!block || !debug_info) { DECL_CHAIN (last) = gimple_bind_vars (scope); gimple_bind_set_vars (scope, temps); } else { /* We need to attach the nodes both to the BIND_EXPR and to its associated BLOCK for debugging purposes. The key point here is that the BLOCK_VARS of the BIND_EXPR_BLOCK of a BIND_EXPR is a subchain of the BIND_EXPR_VARS of the BIND_EXPR. */ if (BLOCK_VARS (block)) BLOCK_VARS (block) = chainon (BLOCK_VARS (block), temps); else { gimple_bind_set_vars (scope, chainon (gimple_bind_vars (scope), temps)); BLOCK_VARS (block) = temps; } } } } /* For VAR a VAR_DECL of variable size, try to find a constant upper bound for the size and adjust DECL_SIZE/DECL_SIZE_UNIT accordingly. Abort if no such upper bound can be obtained. */ static void force_constant_size (tree var) { /* The only attempt we make is by querying the maximum size of objects of the variable's type. */ HOST_WIDE_INT max_size; gcc_assert (TREE_CODE (var) == VAR_DECL); max_size = max_int_size_in_bytes (TREE_TYPE (var)); gcc_assert (max_size >= 0); DECL_SIZE_UNIT (var) = build_int_cst (TREE_TYPE (DECL_SIZE_UNIT (var)), max_size); DECL_SIZE (var) = build_int_cst (TREE_TYPE (DECL_SIZE (var)), max_size * BITS_PER_UNIT); } /* Push the temporary variable TMP into the current binding. */ void gimple_add_tmp_var (tree tmp) { gcc_assert (!DECL_CHAIN (tmp) && !DECL_SEEN_IN_BIND_EXPR_P (tmp)); /* Later processing assumes that the object size is constant, which might not be true at this point. Force the use of a constant upper bound in this case. */ if (!host_integerp (DECL_SIZE_UNIT (tmp), 1)) force_constant_size (tmp); DECL_CONTEXT (tmp) = current_function_decl; DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1; if (gimplify_ctxp) { DECL_CHAIN (tmp) = gimplify_ctxp->temps; gimplify_ctxp->temps = tmp; } else if (cfun) record_vars (tmp); else { gimple_seq body_seq; /* This case is for nested functions. We need to expose the locals they create. */ body_seq = gimple_body (current_function_decl); declare_vars (tmp, gimple_seq_first_stmt (body_seq), false); } } /* Determine whether to assign a location to the statement GS. */ static bool should_carry_location_p (gimple gs) { /* Don't emit a line note for a label. We particularly don't want to emit one for the break label, since it doesn't actually correspond to the beginning of the loop/switch. */ if (gimple_code (gs) == GIMPLE_LABEL) return false; return true; } /* Return true if a location should not be emitted for this statement by annotate_one_with_location. */ static inline bool gimple_do_not_emit_location_p (gimple g) { return gimple_plf (g, GF_PLF_1); } /* Mark statement G so a location will not be emitted by annotate_one_with_location. */ static inline void gimple_set_do_not_emit_location (gimple g) { /* The PLF flags are initialized to 0 when a new tuple is created, so no need to initialize it anywhere. */ gimple_set_plf (g, GF_PLF_1, true); } /* Set the location for gimple statement GS to LOCATION. */ static void annotate_one_with_location (gimple gs, location_t location) { if (!gimple_has_location (gs) && !gimple_do_not_emit_location_p (gs) && should_carry_location_p (gs)) gimple_set_location (gs, location); } /* Set LOCATION for all the statements after iterator GSI in sequence SEQ. If GSI is pointing to the end of the sequence, start with the first statement in SEQ. */ static void annotate_all_with_location_after (gimple_seq seq, gimple_stmt_iterator gsi, location_t location) { if (gsi_end_p (gsi)) gsi = gsi_start (seq); else gsi_next (&gsi); for (; !gsi_end_p (gsi); gsi_next (&gsi)) annotate_one_with_location (gsi_stmt (gsi), location); } /* Set the location for all the statements in a sequence STMT_P to LOCATION. */ void annotate_all_with_location (gimple_seq stmt_p, location_t location) { gimple_stmt_iterator i; if (gimple_seq_empty_p (stmt_p)) return; for (i = gsi_start (stmt_p); !gsi_end_p (i); gsi_next (&i)) { gimple gs = gsi_stmt (i); annotate_one_with_location (gs, location); } } /* This page contains routines to unshare tree nodes, i.e. to duplicate tree nodes that are referenced more than once in GENERIC functions. This is necessary because gimplification (translation into GIMPLE) is performed by modifying tree nodes in-place, so gimplication of a shared node in a first context could generate an invalid GIMPLE form in a second context. This is achieved with a simple mark/copy/unmark algorithm that walks the GENERIC representation top-down, marks nodes with TREE_VISITED the first time it encounters them, duplicates them if they already have TREE_VISITED set, and finally removes the TREE_VISITED marks it has set. The algorithm works only at the function level, i.e. it generates a GENERIC representation of a function with no nodes shared within the function when passed a GENERIC function (except for nodes that are allowed to be shared). At the global level, it is also necessary to unshare tree nodes that are referenced in more than one function, for the same aforementioned reason. This requires some cooperation from the front-end. There are 2 strategies: 1. Manual unsharing. The front-end needs to call unshare_expr on every expression that might end up being shared across functions. 2. Deep unsharing. This is an extension of regular unsharing. Instead of calling unshare_expr on expressions that might be shared across functions, the front-end pre-marks them with TREE_VISITED. This will ensure that they are unshared on the first reference within functions when the regular unsharing algorithm runs. The counterpart is that this algorithm must look deeper than for manual unsharing, which is specified by LANG_HOOKS_DEEP_UNSHARING. If there are only few specific cases of node sharing across functions, it is probably easier for a front-end to unshare the expressions manually. On the contrary, if the expressions generated at the global level are as widespread as expressions generated within functions, deep unsharing is very likely the way to go. */ /* Similar to copy_tree_r but do not copy SAVE_EXPR or TARGET_EXPR nodes. These nodes model computations that must be done once. If we were to unshare something like SAVE_EXPR(i++), the gimplification process would create wrong code. However, if DATA is non-null, it must hold a pointer set that is used to unshare the subtrees of these nodes. */ static tree mostly_copy_tree_r (tree *tp, int *walk_subtrees, void *data) { tree t = *tp; enum tree_code code = TREE_CODE (t); /* Do not copy SAVE_EXPR, TARGET_EXPR or BIND_EXPR nodes themselves, but copy their subtrees if we can make sure to do it only once. */ if (code == SAVE_EXPR || code == TARGET_EXPR || code == BIND_EXPR) { if (data && !pointer_set_insert ((struct pointer_set_t *)data, t)) ; else *walk_subtrees = 0; } /* Stop at types, decls, constants like copy_tree_r. */ else if (TREE_CODE_CLASS (code) == tcc_type || TREE_CODE_CLASS (code) == tcc_declaration || TREE_CODE_CLASS (code) == tcc_constant /* We can't do anything sensible with a BLOCK used as an expression, but we also can't just die when we see it because of non-expression uses. So we avert our eyes and cross our fingers. Silly Java. */ || code == BLOCK) *walk_subtrees = 0; /* Cope with the statement expression extension. */ else if (code == STATEMENT_LIST) ; /* Leave the bulk of the work to copy_tree_r itself. */ else copy_tree_r (tp, walk_subtrees, NULL); return NULL_TREE; } /* Callback for walk_tree to unshare most of the shared trees rooted at *TP. If *TP has been visited already, then *TP is deeply copied by calling mostly_copy_tree_r. DATA is passed to mostly_copy_tree_r unmodified. */ static tree copy_if_shared_r (tree *tp, int *walk_subtrees, void *data) { tree t = *tp; enum tree_code code = TREE_CODE (t); /* Skip types, decls, and constants. But we do want to look at their types and the bounds of types. Mark them as visited so we properly unmark their subtrees on the unmark pass. If we've already seen them, don't look down further. */ if (TREE_CODE_CLASS (code) == tcc_type || TREE_CODE_CLASS (code) == tcc_declaration || TREE_CODE_CLASS (code) == tcc_constant) { if (TREE_VISITED (t)) *walk_subtrees = 0; else TREE_VISITED (t) = 1; } /* If this node has been visited already, unshare it and don't look any deeper. */ else if (TREE_VISITED (t)) { walk_tree (tp, mostly_copy_tree_r, data, NULL); *walk_subtrees = 0; } /* Otherwise, mark the node as visited and keep looking. */ else TREE_VISITED (t) = 1; return NULL_TREE; } /* Unshare most of the shared trees rooted at *TP. DATA is passed to the copy_if_shared_r callback unmodified. */ static inline void copy_if_shared (tree *tp, void *data) { walk_tree (tp, copy_if_shared_r, data, NULL); } /* Unshare all the trees in the body of FNDECL, as well as in the bodies of any nested functions. */ static void unshare_body (tree fndecl) { struct cgraph_node *cgn = cgraph_get_node (fndecl); /* If the language requires deep unsharing, we need a pointer set to make sure we don't repeatedly unshare subtrees of unshareable nodes. */ struct pointer_set_t *visited = lang_hooks.deep_unsharing ? pointer_set_create () : NULL; copy_if_shared (&DECL_SAVED_TREE (fndecl), visited); copy_if_shared (&DECL_SIZE (DECL_RESULT (fndecl)), visited); copy_if_shared (&DECL_SIZE_UNIT (DECL_RESULT (fndecl)), visited); if (visited) pointer_set_destroy (visited); if (cgn) for (cgn = cgn->nested; cgn; cgn = cgn->next_nested) unshare_body (cgn->decl); } /* Callback for walk_tree to unmark the visited trees rooted at *TP. Subtrees are walked until the first unvisited node is encountered. */ static tree unmark_visited_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) { tree t = *tp; /* If this node has been visited, unmark it and keep looking. */ if (TREE_VISITED (t)) TREE_VISITED (t) = 0; /* Otherwise, don't look any deeper. */ else *walk_subtrees = 0; return NULL_TREE; } /* Unmark the visited trees rooted at *TP. */ static inline void unmark_visited (tree *tp) { walk_tree (tp, unmark_visited_r, NULL, NULL); } /* Likewise, but mark all trees as not visited. */ static void unvisit_body (tree fndecl) { struct cgraph_node *cgn = cgraph_get_node (fndecl); unmark_visited (&DECL_SAVED_TREE (fndecl)); unmark_visited (&DECL_SIZE (DECL_RESULT (fndecl))); unmark_visited (&DECL_SIZE_UNIT (DECL_RESULT (fndecl))); if (cgn) for (cgn = cgn->nested; cgn; cgn = cgn->next_nested) unvisit_body (cgn->decl); } /* Unconditionally make an unshared copy of EXPR. This is used when using stored expressions which span multiple functions, such as BINFO_VTABLE, as the normal unsharing process can't tell that they're shared. */ tree unshare_expr (tree expr) { walk_tree (&expr, mostly_copy_tree_r, NULL, NULL); return expr; } /* WRAPPER is a code such as BIND_EXPR or CLEANUP_POINT_EXPR which can both contain statements and have a value. Assign its value to a temporary and give it void_type_node. Return the temporary, or NULL_TREE if WRAPPER was already void. */ tree voidify_wrapper_expr (tree wrapper, tree temp) { tree type = TREE_TYPE (wrapper); if (type && !VOID_TYPE_P (type)) { tree *p; /* Set p to point to the body of the wrapper. Loop until we find something that isn't a wrapper. */ for (p = &wrapper; p && *p; ) { switch (TREE_CODE (*p)) { case BIND_EXPR: TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; /* For a BIND_EXPR, the body is operand 1. */ p = &BIND_EXPR_BODY (*p); break; case CLEANUP_POINT_EXPR: case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; p = &TREE_OPERAND (*p, 0); break; case STATEMENT_LIST: { tree_stmt_iterator i = tsi_last (*p); TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; p = tsi_end_p (i) ? NULL : tsi_stmt_ptr (i); } break; case COMPOUND_EXPR: /* Advance to the last statement. Set all container types to void. */ for (; TREE_CODE (*p) == COMPOUND_EXPR; p = &TREE_OPERAND (*p, 1)) { TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; } break; default: /* Assume that any tree upon which voidify_wrapper_expr is directly called is a wrapper, and that its body is op0. */ if (p == &wrapper) { TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; p = &TREE_OPERAND (*p, 0); break; } goto out; } } out: if (p == NULL || IS_EMPTY_STMT (*p)) temp = NULL_TREE; else if (temp) { /* The wrapper is on the RHS of an assignment that we're pushing down. */ gcc_assert (TREE_CODE (temp) == INIT_EXPR || TREE_CODE (temp) == MODIFY_EXPR); TREE_OPERAND (temp, 1) = *p; *p = temp; } else { temp = create_tmp_var (type, "retval"); *p = build2 (INIT_EXPR, type, temp, *p); } return temp; } return NULL_TREE; } /* Prepare calls to builtins to SAVE and RESTORE the stack as well as a temporary through which they communicate. */ static void build_stack_save_restore (gimple *save, gimple *restore) { tree tmp_var; *save = gimple_build_call (builtin_decl_implicit (BUILT_IN_STACK_SAVE), 0); tmp_var = create_tmp_var (ptr_type_node, "saved_stack"); gimple_call_set_lhs (*save, tmp_var); *restore = gimple_build_call (builtin_decl_implicit (BUILT_IN_STACK_RESTORE), 1, tmp_var); } /* Gimplify a BIND_EXPR. Just voidify and recurse. */ static enum gimplify_status gimplify_bind_expr (tree *expr_p, gimple_seq *pre_p) { tree bind_expr = *expr_p; bool old_save_stack = gimplify_ctxp->save_stack; tree t; gimple gimple_bind; gimple_seq body, cleanup; gimple stack_save; tree temp = voidify_wrapper_expr (bind_expr, NULL); /* Mark variables seen in this bind expr. */ for (t = BIND_EXPR_VARS (bind_expr); t ; t = DECL_CHAIN (t)) { if (TREE_CODE (t) == VAR_DECL) { DECL_SEEN_IN_BIND_EXPR_P (t) = 1; if (DECL_HARD_REGISTER (t) && !is_global_var (t) && cfun) cfun->has_local_explicit_reg_vars = true; } /* Preliminarily mark non-addressed complex variables as eligible for promotion to gimple registers. We'll transform their uses as we find them. */ if ((TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE || TREE_CODE (TREE_TYPE (t)) == VECTOR_TYPE) && !TREE_THIS_VOLATILE (t) && (TREE_CODE (t) == VAR_DECL && !DECL_HARD_REGISTER (t)) && !needs_to_live_in_memory (t)) DECL_GIMPLE_REG_P (t) = 1; } gimple_bind = gimple_build_bind (BIND_EXPR_VARS (bind_expr), NULL, BIND_EXPR_BLOCK (bind_expr)); gimple_push_bind_expr (gimple_bind); gimplify_ctxp->save_stack = false; /* Gimplify the body into the GIMPLE_BIND tuple's body. */ body = NULL; gimplify_stmt (&BIND_EXPR_BODY (bind_expr), &body); gimple_bind_set_body (gimple_bind, body); cleanup = NULL; stack_save = NULL; if (gimplify_ctxp->save_stack) { gimple stack_restore; /* Save stack on entry and restore it on exit. Add a try_finally block to achieve this. Note that mudflap depends on the format of the emitted code: see mx_register_decls(). */ build_stack_save_restore (&stack_save, &stack_restore); gimplify_seq_add_stmt (&cleanup, stack_restore); } /* Add clobbers for all variables that go out of scope. */ for (t = BIND_EXPR_VARS (bind_expr); t ; t = DECL_CHAIN (t)) { if (TREE_CODE (t) == VAR_DECL && !is_global_var (t) && DECL_CONTEXT (t) == current_function_decl && !DECL_HARD_REGISTER (t) && !TREE_THIS_VOLATILE (t) && !DECL_HAS_VALUE_EXPR_P (t) /* Only care for variables that have to be in memory. Others will be rewritten into SSA names, hence moved to the top-level. */ && !is_gimple_reg (t)) { tree clobber = build_constructor (TREE_TYPE (t), NULL); TREE_THIS_VOLATILE (clobber) = 1; gimplify_seq_add_stmt (&cleanup, gimple_build_assign (t, clobber)); } } if (cleanup) { gimple gs; gimple_seq new_body; new_body = NULL; gs = gimple_build_try (gimple_bind_body (gimple_bind), cleanup, GIMPLE_TRY_FINALLY); if (stack_save) gimplify_seq_add_stmt (&new_body, stack_save); gimplify_seq_add_stmt (&new_body, gs); gimple_bind_set_body (gimple_bind, new_body); } gimplify_ctxp->save_stack = old_save_stack; gimple_pop_bind_expr (); gimplify_seq_add_stmt (pre_p, gimple_bind); if (temp) { *expr_p = temp; return GS_OK; } *expr_p = NULL_TREE; return GS_ALL_DONE; } /* Gimplify a RETURN_EXPR. If the expression to be returned is not a GIMPLE value, it is assigned to a new temporary and the statement is re-written to return the temporary. PRE_P points to the sequence where side effects that must happen before STMT should be stored. */ static enum gimplify_status gimplify_return_expr (tree stmt, gimple_seq *pre_p) { gimple ret; tree ret_expr = TREE_OPERAND (stmt, 0); tree result_decl, result; if (ret_expr == error_mark_node) return GS_ERROR; if (!ret_expr || TREE_CODE (ret_expr) == RESULT_DECL || ret_expr == error_mark_node) { gimple ret = gimple_build_return (ret_expr); gimple_set_no_warning (ret, TREE_NO_WARNING (stmt)); gimplify_seq_add_stmt (pre_p, ret); return GS_ALL_DONE; } if (VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl)))) result_decl = NULL_TREE; else { result_decl = TREE_OPERAND (ret_expr, 0); /* See through a return by reference. */ if (TREE_CODE (result_decl) == INDIRECT_REF) result_decl = TREE_OPERAND (result_decl, 0); gcc_assert ((TREE_CODE (ret_expr) == MODIFY_EXPR || TREE_CODE (ret_expr) == INIT_EXPR) && TREE_CODE (result_decl) == RESULT_DECL); } /* If aggregate_value_p is true, then we can return the bare RESULT_DECL. Recall that aggregate_value_p is FALSE for any aggregate type that is returned in registers. If we're returning values in registers, then we don't want to extend the lifetime of the RESULT_DECL, particularly across another call. In addition, for those aggregates for which hard_function_value generates a PARALLEL, we'll die during normal expansion of structure assignments; there's special code in expand_return to handle this case that does not exist in expand_expr. */ if (!result_decl) result = NULL_TREE; else if (aggregate_value_p (result_decl, TREE_TYPE (current_function_decl))) { if (TREE_CODE (DECL_SIZE (result_decl)) != INTEGER_CST) { if (!TYPE_SIZES_GIMPLIFIED (TREE_TYPE (result_decl))) gimplify_type_sizes (TREE_TYPE (result_decl), pre_p); /* Note that we don't use gimplify_vla_decl because the RESULT_DECL should be effectively allocated by the caller, i.e. all calls to this function must be subject to the Return Slot Optimization. */ gimplify_one_sizepos (&DECL_SIZE (result_decl), pre_p); gimplify_one_sizepos (&DECL_SIZE_UNIT (result_decl), pre_p); } result = result_decl; } else if (gimplify_ctxp->return_temp) result = gimplify_ctxp->return_temp; else { result = create_tmp_reg (TREE_TYPE (result_decl), NULL); /* ??? With complex control flow (usually involving abnormal edges), we can wind up warning about an uninitialized value for this. Due to how this variable is constructed and initialized, this is never true. Give up and never warn. */ TREE_NO_WARNING (result) = 1; gimplify_ctxp->return_temp = result; } /* Smash the lhs of the MODIFY_EXPR to the temporary we plan to use. Then gimplify the whole thing. */ if (result != result_decl) TREE_OPERAND (ret_expr, 0) = result; gimplify_and_add (TREE_OPERAND (stmt, 0), pre_p); ret = gimple_build_return (result); gimple_set_no_warning (ret, TREE_NO_WARNING (stmt)); gimplify_seq_add_stmt (pre_p, ret); return GS_ALL_DONE; } /* Gimplify a variable-length array DECL. */ static void gimplify_vla_decl (tree decl, gimple_seq *seq_p) { /* This is a variable-sized decl. Simplify its size and mark it for deferred expansion. Note that mudflap depends on the format of the emitted code: see mx_register_decls(). */ tree t, addr, ptr_type; gimplify_one_sizepos (&DECL_SIZE (decl), seq_p); gimplify_one_sizepos (&DECL_SIZE_UNIT (decl), seq_p); /* All occurrences of this decl in final gimplified code will be replaced by indirection. Setting DECL_VALUE_EXPR does two things: First, it lets the rest of the gimplifier know what replacement to use. Second, it lets the debug info know where to find the value. */ ptr_type = build_pointer_type (TREE_TYPE (decl)); addr = create_tmp_var (ptr_type, get_name (decl)); DECL_IGNORED_P (addr) = 0; t = build_fold_indirect_ref (addr); TREE_THIS_NOTRAP (t) = 1; SET_DECL_VALUE_EXPR (decl, t); DECL_HAS_VALUE_EXPR_P (decl) = 1; t = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN); t = build_call_expr (t, 2, DECL_SIZE_UNIT (decl), size_int (DECL_ALIGN (decl))); /* The call has been built for a variable-sized object. */ CALL_ALLOCA_FOR_VAR_P (t) = 1; t = fold_convert (ptr_type, t); t = build2 (MODIFY_EXPR, TREE_TYPE (addr), addr, t); gimplify_and_add (t, seq_p); /* Indicate that we need to restore the stack level when the enclosing BIND_EXPR is exited. */ gimplify_ctxp->save_stack = true; } /* Gimplify a DECL_EXPR node *STMT_P by making any necessary allocation and initialization explicit. */ static enum gimplify_status gimplify_decl_expr (tree *stmt_p, gimple_seq *seq_p) { tree stmt = *stmt_p; tree decl = DECL_EXPR_DECL (stmt); *stmt_p = NULL_TREE; if (TREE_TYPE (decl) == error_mark_node) return GS_ERROR; if ((TREE_CODE (decl) == TYPE_DECL || TREE_CODE (decl) == VAR_DECL) && !TYPE_SIZES_GIMPLIFIED (TREE_TYPE (decl))) gimplify_type_sizes (TREE_TYPE (decl), seq_p); /* ??? DECL_ORIGINAL_TYPE is streamed for LTO so it needs to be gimplified in case its size expressions contain problematic nodes like CALL_EXPR. */ if (TREE_CODE (decl) == TYPE_DECL && DECL_ORIGINAL_TYPE (decl) && !TYPE_SIZES_GIMPLIFIED (DECL_ORIGINAL_TYPE (decl))) gimplify_type_sizes (DECL_ORIGINAL_TYPE (decl), seq_p); if (TREE_CODE (decl) == VAR_DECL && !DECL_EXTERNAL (decl)) { tree init = DECL_INITIAL (decl); if (TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST || (!TREE_STATIC (decl) && flag_stack_check == GENERIC_STACK_CHECK && compare_tree_int (DECL_SIZE_UNIT (decl), STACK_CHECK_MAX_VAR_SIZE) > 0)) gimplify_vla_decl (decl, seq_p); /* Some front ends do not explicitly declare all anonymous artificial variables. We compensate here by declaring the variables, though it would be better if the front ends would explicitly declare them. */ if (!DECL_SEEN_IN_BIND_EXPR_P (decl) && DECL_ARTIFICIAL (decl) && DECL_NAME (decl) == NULL_TREE) gimple_add_tmp_var (decl); if (init && init != error_mark_node) { if (!TREE_STATIC (decl)) { DECL_INITIAL (decl) = NULL_TREE; init = build2 (INIT_EXPR, void_type_node, decl, init); gimplify_and_add (init, seq_p); ggc_free (init); } else /* We must still examine initializers for static variables as they may contain a label address. */ walk_tree (&init, force_labels_r, NULL, NULL); } } return GS_ALL_DONE; } /* Gimplify a LOOP_EXPR. Normally this just involves gimplifying the body and replacing the LOOP_EXPR with goto, but if the loop contains an EXIT_EXPR, we need to append a label for it to jump to. */ static enum gimplify_status gimplify_loop_expr (tree *expr_p, gimple_seq *pre_p) { tree saved_label = gimplify_ctxp->exit_label; tree start_label = create_artificial_label (UNKNOWN_LOCATION); gimplify_seq_add_stmt (pre_p, gimple_build_label (start_label)); gimplify_ctxp->exit_label = NULL_TREE; gimplify_and_add (LOOP_EXPR_BODY (*expr_p), pre_p); gimplify_seq_add_stmt (pre_p, gimple_build_goto (start_label)); if (gimplify_ctxp->exit_label) gimplify_seq_add_stmt (pre_p, gimple_build_label (gimplify_ctxp->exit_label)); gimplify_ctxp->exit_label = saved_label; *expr_p = NULL; return GS_ALL_DONE; } /* Gimplify a statement list onto a sequence. These may be created either by an enlightened front-end, or by shortcut_cond_expr. */ static enum gimplify_status gimplify_statement_list (tree *expr_p, gimple_seq *pre_p) { tree temp = voidify_wrapper_expr (*expr_p, NULL); tree_stmt_iterator i = tsi_start (*expr_p); while (!tsi_end_p (i)) { gimplify_stmt (tsi_stmt_ptr (i), pre_p); tsi_delink (&i); } if (temp) { *expr_p = temp; return GS_OK; } return GS_ALL_DONE; } /* Compare two case labels. Because the front end should already have made sure that case ranges do not overlap, it is enough to only compare the CASE_LOW values of each case label. */ static int compare_case_labels (const void *p1, const void *p2) { const_tree const case1 = *(const_tree const*)p1; const_tree const case2 = *(const_tree const*)p2; /* The 'default' case label always goes first. */ if (!CASE_LOW (case1)) return -1; else if (!CASE_LOW (case2)) return 1; else return tree_int_cst_compare (CASE_LOW (case1), CASE_LOW (case2)); } /* Sort the case labels in LABEL_VEC in place in ascending order. */ void sort_case_labels (VEC(tree,heap)* label_vec) { VEC_qsort (tree, label_vec, compare_case_labels); } /* Gimplify a SWITCH_EXPR, and collect a TREE_VEC of the labels it can branch to. */ static enum gimplify_status gimplify_switch_expr (tree *expr_p, gimple_seq *pre_p) { tree switch_expr = *expr_p; gimple_seq switch_body_seq = NULL; enum gimplify_status ret; ret = gimplify_expr (&SWITCH_COND (switch_expr), pre_p, NULL, is_gimple_val, fb_rvalue); if (ret == GS_ERROR || ret == GS_UNHANDLED) return ret; if (SWITCH_BODY (switch_expr)) { VEC (tree,heap) *labels; VEC (tree,heap) *saved_labels; tree default_case = NULL_TREE; size_t i, len; gimple gimple_switch; /* If someone can be bothered to fill in the labels, they can be bothered to null out the body too. */ gcc_assert (!SWITCH_LABELS (switch_expr)); /* save old labels, get new ones from body, then restore the old labels. Save all the things from the switch body to append after. */ saved_labels = gimplify_ctxp->case_labels; gimplify_ctxp->case_labels = VEC_alloc (tree, heap, 8); gimplify_stmt (&SWITCH_BODY (switch_expr), &switch_body_seq); labels = gimplify_ctxp->case_labels; gimplify_ctxp->case_labels = saved_labels; i = 0; while (i < VEC_length (tree, labels)) { tree elt = VEC_index (tree, labels, i); tree low = CASE_LOW (elt); bool remove_element = FALSE; if (low) { /* Discard empty ranges. */ tree high = CASE_HIGH (elt); if (high && tree_int_cst_lt (high, low)) remove_element = TRUE; } else { /* The default case must be the last label in the list. */ gcc_assert (!default_case); default_case = elt; remove_element = TRUE; } if (remove_element) VEC_ordered_remove (tree, labels, i); else i++; } len = i; if (!VEC_empty (tree, labels)) sort_case_labels (labels); if (!default_case) { tree type = TREE_TYPE (switch_expr); /* If the switch has no default label, add one, so that we jump around the switch body. If the labels already cover the whole range of type, add the default label pointing to one of the existing labels. */ if (type == void_type_node) type = TREE_TYPE (SWITCH_COND (switch_expr)); if (len && INTEGRAL_TYPE_P (type) && TYPE_MIN_VALUE (type) && TYPE_MAX_VALUE (type) && tree_int_cst_equal (CASE_LOW (VEC_index (tree, labels, 0)), TYPE_MIN_VALUE (type))) { tree low, high = CASE_HIGH (VEC_index (tree, labels, len - 1)); if (!high) high = CASE_LOW (VEC_index (tree, labels, len - 1)); if (tree_int_cst_equal (high, TYPE_MAX_VALUE (type))) { for (i = 1; i < len; i++) { high = CASE_LOW (VEC_index (tree, labels, i)); low = CASE_HIGH (VEC_index (tree, labels, i - 1)); if (!low) low = CASE_LOW (VEC_index (tree, labels, i - 1)); if ((TREE_INT_CST_LOW (low) + 1 != TREE_INT_CST_LOW (high)) || (TREE_INT_CST_HIGH (low) + (TREE_INT_CST_LOW (high) == 0) != TREE_INT_CST_HIGH (high))) break; } if (i == len) { tree label = CASE_LABEL (VEC_index (tree, labels, 0)); default_case = build_case_label (NULL_TREE, NULL_TREE, label); } } } if (!default_case) { gimple new_default; default_case = build_case_label (NULL_TREE, NULL_TREE, create_artificial_label (UNKNOWN_LOCATION)); new_default = gimple_build_label (CASE_LABEL (default_case)); gimplify_seq_add_stmt (&switch_body_seq, new_default); } } gimple_switch = gimple_build_switch_vec (SWITCH_COND (switch_expr), default_case, labels); gimplify_seq_add_stmt (pre_p, gimple_switch); gimplify_seq_add_seq (pre_p, switch_body_seq); VEC_free(tree, heap, labels); } else gcc_assert (SWITCH_LABELS (switch_expr)); return GS_ALL_DONE; } /* Gimplify the CASE_LABEL_EXPR pointed to by EXPR_P. */ static enum gimplify_status gimplify_case_label_expr (tree *expr_p, gimple_seq *pre_p) { struct gimplify_ctx *ctxp; gimple gimple_label; /* Invalid OpenMP programs can play Duff's Device type games with #pragma omp parallel. At least in the C front end, we don't detect such invalid branches until after gimplification. */ for (ctxp = gimplify_ctxp; ; ctxp = ctxp->prev_context) if (ctxp->case_labels) break; gimple_label = gimple_build_label (CASE_LABEL (*expr_p)); VEC_safe_push (tree, heap, ctxp->case_labels, *expr_p); gimplify_seq_add_stmt (pre_p, gimple_label); return GS_ALL_DONE; } /* Build a GOTO to the LABEL_DECL pointed to by LABEL_P, building it first if necessary. */ tree build_and_jump (tree *label_p) { if (label_p == NULL) /* If there's nowhere to jump, just fall through. */ return NULL_TREE; if (*label_p == NULL_TREE) { tree label = create_artificial_label (UNKNOWN_LOCATION); *label_p = label; } return build1 (GOTO_EXPR, void_type_node, *label_p); } /* Gimplify an EXIT_EXPR by converting to a GOTO_EXPR inside a COND_EXPR. This also involves building a label to jump to and communicating it to gimplify_loop_expr through gimplify_ctxp->exit_label. */ static enum gimplify_status gimplify_exit_expr (tree *expr_p) { tree cond = TREE_OPERAND (*expr_p, 0); tree expr; expr = build_and_jump (&gimplify_ctxp->exit_label); expr = build3 (COND_EXPR, void_type_node, cond, expr, NULL_TREE); *expr_p = expr; return GS_OK; } /* A helper function to be called via walk_tree. Mark all labels under *TP as being forced. To be called for DECL_INITIAL of static variables. */ tree force_labels_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) { if (TYPE_P (*tp)) *walk_subtrees = 0; if (TREE_CODE (*tp) == LABEL_DECL) FORCED_LABEL (*tp) = 1; return NULL_TREE; } /* *EXPR_P is a COMPONENT_REF being used as an rvalue. If its type is different from its canonical type, wrap the whole thing inside a NOP_EXPR and force the type of the COMPONENT_REF to be the canonical type. The canonical type of a COMPONENT_REF is the type of the field being referenced--unless the field is a bit-field which can be read directly in a smaller mode, in which case the canonical type is the sign-appropriate type corresponding to that mode. */ static void canonicalize_component_ref (tree *expr_p) { tree expr = *expr_p; tree type; gcc_assert (TREE_CODE (expr) == COMPONENT_REF); if (INTEGRAL_TYPE_P (TREE_TYPE (expr))) type = TREE_TYPE (get_unwidened (expr, NULL_TREE)); else type = TREE_TYPE (TREE_OPERAND (expr, 1)); /* One could argue that all the stuff below is not necessary for the non-bitfield case and declare it a FE error if type adjustment would be needed. */ if (TREE_TYPE (expr) != type) { #ifdef ENABLE_TYPES_CHECKING tree old_type = TREE_TYPE (expr); #endif int type_quals; /* We need to preserve qualifiers and propagate them from operand 0. */ type_quals = TYPE_QUALS (type) | TYPE_QUALS (TREE_TYPE (TREE_OPERAND (expr, 0))); if (TYPE_QUALS (type) != type_quals) type = build_qualified_type (TYPE_MAIN_VARIANT (type), type_quals); /* Set the type of the COMPONENT_REF to the underlying type. */ TREE_TYPE (expr) = type; #ifdef ENABLE_TYPES_CHECKING /* It is now a FE error, if the conversion from the canonical type to the original expression type is not useless. */ gcc_assert (useless_type_conversion_p (old_type, type)); #endif } } /* If a NOP conversion is changing a pointer to array of foo to a pointer to foo, embed that change in the ADDR_EXPR by converting T array[U]; (T *)&array ==> &array[L] where L is the lower bound. For simplicity, only do this for constant lower bound. The constraint is that the type of &array[L] is trivially convertible to T *. */ static void canonicalize_addr_expr (tree *expr_p) { tree expr = *expr_p; tree addr_expr = TREE_OPERAND (expr, 0); tree datype, ddatype, pddatype; /* We simplify only conversions from an ADDR_EXPR to a pointer type. */ if (!POINTER_TYPE_P (TREE_TYPE (expr)) || TREE_CODE (addr_expr) != ADDR_EXPR) return; /* The addr_expr type should be a pointer to an array. */ datype = TREE_TYPE (TREE_TYPE (addr_expr)); if (TREE_CODE (datype) != ARRAY_TYPE) return; /* The pointer to element type shall be trivially convertible to the expression pointer type. */ ddatype = TREE_TYPE (datype); pddatype = build_pointer_type (ddatype); if (!useless_type_conversion_p (TYPE_MAIN_VARIANT (TREE_TYPE (expr)), pddatype)) return; /* The lower bound and element sizes must be constant. */ if (!TYPE_SIZE_UNIT (ddatype) || TREE_CODE (TYPE_SIZE_UNIT (ddatype)) != INTEGER_CST || !TYPE_DOMAIN (datype) || !TYPE_MIN_VALUE (TYPE_DOMAIN (datype)) || TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (datype))) != INTEGER_CST) return; /* All checks succeeded. Build a new node to merge the cast. */ *expr_p = build4 (ARRAY_REF, ddatype, TREE_OPERAND (addr_expr, 0), TYPE_MIN_VALUE (TYPE_DOMAIN (datype)), NULL_TREE, NULL_TREE); *expr_p = build1 (ADDR_EXPR, pddatype, *expr_p); /* We can have stripped a required restrict qualifier above. */ if (!useless_type_conversion_p (TREE_TYPE (expr), TREE_TYPE (*expr_p))) *expr_p = fold_convert (TREE_TYPE (expr), *expr_p); } /* *EXPR_P is a NOP_EXPR or CONVERT_EXPR. Remove it and/or other conversions underneath as appropriate. */ static enum gimplify_status gimplify_conversion (tree *expr_p) { location_t loc = EXPR_LOCATION (*expr_p); gcc_assert (CONVERT_EXPR_P (*expr_p)); /* Then strip away all but the outermost conversion. */ STRIP_SIGN_NOPS (TREE_OPERAND (*expr_p, 0)); /* And remove the outermost conversion if it's useless. */ if (tree_ssa_useless_type_conversion (*expr_p)) *expr_p = TREE_OPERAND (*expr_p, 0); /* If we still have a conversion at the toplevel, then canonicalize some constructs. */ if (CONVERT_EXPR_P (*expr_p)) { tree sub = TREE_OPERAND (*expr_p, 0); /* If a NOP conversion is changing the type of a COMPONENT_REF expression, then canonicalize its type now in order to expose more redundant conversions. */ if (TREE_CODE (sub) == COMPONENT_REF) canonicalize_component_ref (&TREE_OPERAND (*expr_p, 0)); /* If a NOP conversion is changing a pointer to array of foo to a pointer to foo, embed that change in the ADDR_EXPR. */ else if (TREE_CODE (sub) == ADDR_EXPR) canonicalize_addr_expr (expr_p); } /* If we have a conversion to a non-register type force the use of a VIEW_CONVERT_EXPR instead. */ if (CONVERT_EXPR_P (*expr_p) && !is_gimple_reg_type (TREE_TYPE (*expr_p))) *expr_p = fold_build1_loc (loc, VIEW_CONVERT_EXPR, TREE_TYPE (*expr_p), TREE_OPERAND (*expr_p, 0)); return GS_OK; } /* Nonlocal VLAs seen in the current function. */ static struct pointer_set_t *nonlocal_vlas; /* Gimplify a VAR_DECL or PARM_DECL. Return GS_OK if we expanded a DECL_VALUE_EXPR, and it's worth re-examining things. */ static enum gimplify_status gimplify_var_or_parm_decl (tree *expr_p) { tree decl = *expr_p; /* ??? If this is a local variable, and it has not been seen in any outer BIND_EXPR, then it's probably the result of a duplicate declaration, for which we've already issued an error. It would be really nice if the front end wouldn't leak these at all. Currently the only known culprit is C++ destructors, as seen in g++.old-deja/g++.jason/binding.C. */ if (TREE_CODE (decl) == VAR_DECL && !DECL_SEEN_IN_BIND_EXPR_P (decl) && !TREE_STATIC (decl) && !DECL_EXTERNAL (decl) && decl_function_context (decl) == current_function_decl) { gcc_assert (seen_error ()); return GS_ERROR; } /* If the decl is an alias for another expression, substitute it now. */ if (DECL_HAS_VALUE_EXPR_P (decl)) { tree value_expr = DECL_VALUE_EXPR (decl); *expr_p = unshare_expr (value_expr); return GS_OK; } return GS_ALL_DONE; } /* Gimplify the COMPONENT_REF, ARRAY_REF, REALPART_EXPR or IMAGPART_EXPR node *EXPR_P. compound_lval : min_lval '[' val ']' | min_lval '.' ID | compound_lval '[' val ']' | compound_lval '.' ID This is not part of the original SIMPLE definition, which separates array and member references, but it seems reasonable to handle them together. Also, this way we don't run into problems with union aliasing; gcc requires that for accesses through a union to alias, the union reference must be explicit, which was not always the case when we were splitting up array and member refs. PRE_P points to the sequence where side effects that must happen before *EXPR_P should be stored. POST_P points to the sequence where side effects that must happen after *EXPR_P should be stored. */ static enum gimplify_status gimplify_compound_lval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, fallback_t fallback) { tree *p; VEC(tree,heap) *stack; enum gimplify_status ret = GS_ALL_DONE, tret; int i; location_t loc = EXPR_LOCATION (*expr_p); tree expr = *expr_p; /* Create a stack of the subexpressions so later we can walk them in order from inner to outer. */ stack = VEC_alloc (tree, heap, 10); /* We can handle anything that get_inner_reference can deal with. */ for (p = expr_p; ; p = &TREE_OPERAND (*p, 0)) { restart: /* Fold INDIRECT_REFs now to turn them into ARRAY_REFs. */ if (TREE_CODE (*p) == INDIRECT_REF) *p = fold_indirect_ref_loc (loc, *p); if (handled_component_p (*p)) ; /* Expand DECL_VALUE_EXPR now. In some cases that may expose additional COMPONENT_REFs. */ else if ((TREE_CODE (*p) == VAR_DECL || TREE_CODE (*p) == PARM_DECL) && gimplify_var_or_parm_decl (p) == GS_OK) goto restart; else break; VEC_safe_push (tree, heap, stack, *p); } gcc_assert (VEC_length (tree, stack)); /* Now STACK is a stack of pointers to all the refs we've walked through and P points to the innermost expression. Java requires that we elaborated nodes in source order. That means we must gimplify the inner expression followed by each of the indices, in order. But we can't gimplify the inner expression until we deal with any variable bounds, sizes, or positions in order to deal with PLACEHOLDER_EXPRs. So we do this in three steps. First we deal with the annotations for any variables in the components, then we gimplify the base, then we gimplify any indices, from left to right. */ for (i = VEC_length (tree, stack) - 1; i >= 0; i--) { tree t = VEC_index (tree, stack, i); if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { /* Gimplify the low bound and element type size and put them into the ARRAY_REF. If these values are set, they have already been gimplified. */ if (TREE_OPERAND (t, 2) == NULL_TREE) { tree low = unshare_expr (array_ref_low_bound (t)); if (!is_gimple_min_invariant (low)) { TREE_OPERAND (t, 2) = low; tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_reg, fb_rvalue); ret = MIN (ret, tret); } } else { tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_reg, fb_rvalue); ret = MIN (ret, tret); } if (TREE_OPERAND (t, 3) == NULL_TREE) { tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0))); tree elmt_size = unshare_expr (array_ref_element_size (t)); tree factor = size_int (TYPE_ALIGN_UNIT (elmt_type)); /* Divide the element size by the alignment of the element type (above). */ elmt_size = size_binop_loc (loc, EXACT_DIV_EXPR, elmt_size, factor); if (!is_gimple_min_invariant (elmt_size)) { TREE_OPERAND (t, 3) = elmt_size; tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p, post_p, is_gimple_reg, fb_rvalue); ret = MIN (ret, tret); } } else { tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p, post_p, is_gimple_reg, fb_rvalue); ret = MIN (ret, tret); } } else if (TREE_CODE (t) == COMPONENT_REF) { /* Set the field offset into T and gimplify it. */ if (TREE_OPERAND (t, 2) == NULL_TREE) { tree offset = unshare_expr (component_ref_field_offset (t)); tree field = TREE_OPERAND (t, 1); tree factor = size_int (DECL_OFFSET_ALIGN (field) / BITS_PER_UNIT); /* Divide the offset by its alignment. */ offset = size_binop_loc (loc, EXACT_DIV_EXPR, offset, factor); if (!is_gimple_min_invariant (offset)) { TREE_OPERAND (t, 2) = offset; tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_reg, fb_rvalue); ret = MIN (ret, tret); } } else { tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_reg, fb_rvalue); ret = MIN (ret, tret); } } } /* Step 2 is to gimplify the base expression. Make sure lvalue is set so as to match the min_lval predicate. Failure to do so may result in the creation of large aggregate temporaries. */ tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval, fallback | fb_lvalue); ret = MIN (ret, tret); /* And finally, the indices and operands to BIT_FIELD_REF. During this loop we also remove any useless conversions. */ for (; VEC_length (tree, stack) > 0; ) { tree t = VEC_pop (tree, stack); if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { /* Gimplify the dimension. */ if (!is_gimple_min_invariant (TREE_OPERAND (t, 1))) { tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); } } else if (TREE_CODE (t) == BIT_FIELD_REF) { tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); } STRIP_USELESS_TYPE_CONVERSION (TREE_OPERAND (t, 0)); /* The innermost expression P may have originally had TREE_SIDE_EFFECTS set which would have caused all the outer expressions in *EXPR_P leading to P to also have had TREE_SIDE_EFFECTS set. */ recalculate_side_effects (t); } /* If the outermost expression is a COMPONENT_REF, canonicalize its type. */ if ((fallback & fb_rvalue) && TREE_CODE (*expr_p) == COMPONENT_REF) { canonicalize_component_ref (expr_p); } VEC_free (tree, heap, stack); gcc_assert (*expr_p == expr || ret != GS_ALL_DONE); return ret; } /* Gimplify the self modifying expression pointed to by EXPR_P (++, --, +=, -=). PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. WANT_VALUE is nonzero iff we want to use the value of this expression in another expression. */ static enum gimplify_status gimplify_self_mod_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, bool want_value) { enum tree_code code; tree lhs, lvalue, rhs, t1; gimple_seq post = NULL, *orig_post_p = post_p; bool postfix; enum tree_code arith_code; enum gimplify_status ret; location_t loc = EXPR_LOCATION (*expr_p); code = TREE_CODE (*expr_p); gcc_assert (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR || code == PREINCREMENT_EXPR || code == PREDECREMENT_EXPR); /* Prefix or postfix? */ if (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR) /* Faster to treat as prefix if result is not used. */ postfix = want_value; else postfix = false; /* For postfix, make sure the inner expression's post side effects are executed after side effects from this expression. */ if (postfix) post_p = &post; /* Add or subtract? */ if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR) arith_code = PLUS_EXPR; else arith_code = MINUS_EXPR; /* Gimplify the LHS into a GIMPLE lvalue. */ lvalue = TREE_OPERAND (*expr_p, 0); ret = gimplify_expr (&lvalue, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; /* Extract the operands to the arithmetic operation. */ lhs = lvalue; rhs = TREE_OPERAND (*expr_p, 1); /* For postfix operator, we evaluate the LHS to an rvalue and then use that as the result value and in the postqueue operation. We also make sure to make lvalue a minimal lval, see gcc.c-torture/execute/20040313-1.c for an example where this matters. */ if (postfix) { if (!is_gimple_min_lval (lvalue)) { mark_addressable (lvalue); lvalue = build_fold_addr_expr_loc (input_location, lvalue); gimplify_expr (&lvalue, pre_p, post_p, is_gimple_val, fb_rvalue); lvalue = build_fold_indirect_ref_loc (input_location, lvalue); } ret = gimplify_expr (&lhs, pre_p, post_p, is_gimple_val, fb_rvalue); if (ret == GS_ERROR) return ret; } /* For POINTERs increment, use POINTER_PLUS_EXPR. */ if (POINTER_TYPE_P (TREE_TYPE (lhs))) { rhs = convert_to_ptrofftype_loc (loc, rhs); if (arith_code == MINUS_EXPR) rhs = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (rhs), rhs); arith_code = POINTER_PLUS_EXPR; } t1 = build2 (arith_code, TREE_TYPE (*expr_p), lhs, rhs); if (postfix) { gimplify_assign (lvalue, t1, orig_post_p); gimplify_seq_add_seq (orig_post_p, post); *expr_p = lhs; return GS_ALL_DONE; } else { *expr_p = build2 (MODIFY_EXPR, TREE_TYPE (lvalue), lvalue, t1); return GS_OK; } } /* If *EXPR_P has a variable sized type, wrap it in a WITH_SIZE_EXPR. */ static void maybe_with_size_expr (tree *expr_p) { tree expr = *expr_p; tree type = TREE_TYPE (expr); tree size; /* If we've already wrapped this or the type is error_mark_node, we can't do anything. */ if (TREE_CODE (expr) == WITH_SIZE_EXPR || type == error_mark_node) return; /* If the size isn't known or is a constant, we have nothing to do. */ size = TYPE_SIZE_UNIT (type); if (!size || TREE_CODE (size) == INTEGER_CST) return; /* Otherwise, make a WITH_SIZE_EXPR. */ size = unshare_expr (size); size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, expr); *expr_p = build2 (WITH_SIZE_EXPR, type, expr, size); } /* Helper for gimplify_call_expr. Gimplify a single argument *ARG_P Store any side-effects in PRE_P. CALL_LOCATION is the location of the CALL_EXPR. */ static enum gimplify_status gimplify_arg (tree *arg_p, gimple_seq *pre_p, location_t call_location) { bool (*test) (tree); fallback_t fb; /* In general, we allow lvalues for function arguments to avoid extra overhead of copying large aggregates out of even larger aggregates into temporaries only to copy the temporaries to the argument list. Make optimizers happy by pulling out to temporaries those types that fit in registers. */ if (is_gimple_reg_type (TREE_TYPE (*arg_p))) test = is_gimple_val, fb = fb_rvalue; else { test = is_gimple_lvalue, fb = fb_either; /* Also strip a TARGET_EXPR that would force an extra copy. */ if (TREE_CODE (*arg_p) == TARGET_EXPR) { tree init = TARGET_EXPR_INITIAL (*arg_p); if (init && !VOID_TYPE_P (TREE_TYPE (init))) *arg_p = init; } } /* If this is a variable sized type, we must remember the size. */ maybe_with_size_expr (arg_p); /* FIXME diagnostics: This will mess up gcc.dg/Warray-bounds.c. */ /* Make sure arguments have the same location as the function call itself. */ protected_set_expr_location (*arg_p, call_location); /* There is a sequence point before a function call. Side effects in the argument list must occur before the actual call. So, when gimplifying arguments, force gimplify_expr to use an internal post queue which is then appended to the end of PRE_P. */ return gimplify_expr (arg_p, pre_p, NULL, test, fb); } /* Gimplify the CALL_EXPR node *EXPR_P into the GIMPLE sequence PRE_P. WANT_VALUE is true if the result of the call is desired. */ static enum gimplify_status gimplify_call_expr (tree *expr_p, gimple_seq *pre_p, bool want_value) { tree fndecl, parms, p, fnptrtype; enum gimplify_status ret; int i, nargs; gimple call; bool builtin_va_start_p = FALSE; location_t loc = EXPR_LOCATION (*expr_p); gcc_assert (TREE_CODE (*expr_p) == CALL_EXPR); /* For reliable diagnostics during inlining, it is necessary that every call_expr be annotated with file and line. */ if (! EXPR_HAS_LOCATION (*expr_p)) SET_EXPR_LOCATION (*expr_p, input_location); /* This may be a call to a builtin function. Builtin function calls may be transformed into different (and more efficient) builtin function calls under certain circumstances. Unfortunately, gimplification can muck things up enough that the builtin expanders are not aware that certain transformations are still valid. So we attempt transformation/gimplification of the call before we gimplify the CALL_EXPR. At this time we do not manage to transform all calls in the same manner as the expanders do, but we do transform most of them. */ fndecl = get_callee_fndecl (*expr_p); if (fndecl && DECL_BUILT_IN (fndecl)) { tree new_tree = fold_call_expr (input_location, *expr_p, !want_value); if (new_tree && new_tree != *expr_p) { /* There was a transformation of this call which computes the same value, but in a more efficient way. Return and try again. */ *expr_p = new_tree; return GS_OK; } if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_VA_START) { builtin_va_start_p = TRUE; if (call_expr_nargs (*expr_p) < 2) { error ("too few arguments to function %<va_start%>"); *expr_p = build_empty_stmt (EXPR_LOCATION (*expr_p)); return GS_OK; } if (fold_builtin_next_arg (*expr_p, true)) { *expr_p = build_empty_stmt (EXPR_LOCATION (*expr_p)); return GS_OK; } } } /* Remember the original function pointer type. */ fnptrtype = TREE_TYPE (CALL_EXPR_FN (*expr_p)); /* There is a sequence point before the call, so any side effects in the calling expression must occur before the actual call. Force gimplify_expr to use an internal post queue. */ ret = gimplify_expr (&CALL_EXPR_FN (*expr_p), pre_p, NULL, is_gimple_call_addr, fb_rvalue); nargs = call_expr_nargs (*expr_p); /* Get argument types for verification. */ fndecl = get_callee_fndecl (*expr_p); parms = NULL_TREE; if (fndecl) parms = TYPE_ARG_TYPES (TREE_TYPE (fndecl)); else if (POINTER_TYPE_P (TREE_TYPE (CALL_EXPR_FN (*expr_p)))) parms = TYPE_ARG_TYPES (TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (*expr_p)))); if (fndecl && DECL_ARGUMENTS (fndecl)) p = DECL_ARGUMENTS (fndecl); else if (parms) p = parms; else p = NULL_TREE; for (i = 0; i < nargs && p; i++, p = TREE_CHAIN (p)) ; /* If the last argument is __builtin_va_arg_pack () and it is not passed as a named argument, decrease the number of CALL_EXPR arguments and set instead the CALL_EXPR_VA_ARG_PACK flag. */ if (!p && i < nargs && TREE_CODE (CALL_EXPR_ARG (*expr_p, nargs - 1)) == CALL_EXPR) { tree last_arg = CALL_EXPR_ARG (*expr_p, nargs - 1); tree last_arg_fndecl = get_callee_fndecl (last_arg); if (last_arg_fndecl && TREE_CODE (last_arg_fndecl) == FUNCTION_DECL && DECL_BUILT_IN_CLASS (last_arg_fndecl) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (last_arg_fndecl) == BUILT_IN_VA_ARG_PACK) { tree call = *expr_p; --nargs; *expr_p = build_call_array_loc (loc, TREE_TYPE (call), CALL_EXPR_FN (call), nargs, CALL_EXPR_ARGP (call)); /* Copy all CALL_EXPR flags, location and block, except CALL_EXPR_VA_ARG_PACK flag. */ CALL_EXPR_STATIC_CHAIN (*expr_p) = CALL_EXPR_STATIC_CHAIN (call); CALL_EXPR_TAILCALL (*expr_p) = CALL_EXPR_TAILCALL (call); CALL_EXPR_RETURN_SLOT_OPT (*expr_p) = CALL_EXPR_RETURN_SLOT_OPT (call); CALL_FROM_THUNK_P (*expr_p) = CALL_FROM_THUNK_P (call); SET_EXPR_LOCATION (*expr_p, EXPR_LOCATION (call)); TREE_BLOCK (*expr_p) = TREE_BLOCK (call); /* Set CALL_EXPR_VA_ARG_PACK. */ CALL_EXPR_VA_ARG_PACK (*expr_p) = 1; } } /* Finally, gimplify the function arguments. */ if (nargs > 0) { for (i = (PUSH_ARGS_REVERSED ? nargs - 1 : 0); PUSH_ARGS_REVERSED ? i >= 0 : i < nargs; PUSH_ARGS_REVERSED ? i-- : i++) { enum gimplify_status t; /* Avoid gimplifying the second argument to va_start, which needs to be the plain PARM_DECL. */ if ((i != 1) || !builtin_va_start_p) { t = gimplify_arg (&CALL_EXPR_ARG (*expr_p, i), pre_p, EXPR_LOCATION (*expr_p)); if (t == GS_ERROR) ret = GS_ERROR; } } } /* Verify the function result. */ if (want_value && fndecl && VOID_TYPE_P (TREE_TYPE (TREE_TYPE (fnptrtype)))) { error_at (loc, "using result of function returning %<void%>"); ret = GS_ERROR; } /* Try this again in case gimplification exposed something. */ if (ret != GS_ERROR) { tree new_tree = fold_call_expr (input_location, *expr_p, !want_value); if (new_tree && new_tree != *expr_p) { /* There was a transformation of this call which computes the same value, but in a more efficient way. Return and try again. */ *expr_p = new_tree; return GS_OK; } } else { *expr_p = error_mark_node; return GS_ERROR; } /* If the function is "const" or "pure", then clear TREE_SIDE_EFFECTS on its decl. This allows us to eliminate redundant or useless calls to "const" functions. */ if (TREE_CODE (*expr_p) == CALL_EXPR) { int flags = call_expr_flags (*expr_p); if (flags & (ECF_CONST | ECF_PURE) /* An infinite loop is considered a side effect. */ && !(flags & (ECF_LOOPING_CONST_OR_PURE))) TREE_SIDE_EFFECTS (*expr_p) = 0; } /* If the value is not needed by the caller, emit a new GIMPLE_CALL and clear *EXPR_P. Otherwise, leave *EXPR_P in its gimplified form and delegate the creation of a GIMPLE_CALL to gimplify_modify_expr. This is always possible because when WANT_VALUE is true, the caller wants the result of this call into a temporary, which means that we will emit an INIT_EXPR in internal_get_tmp_var which will then be handled by gimplify_modify_expr. */ if (!want_value) { /* The CALL_EXPR in *EXPR_P is already in GIMPLE form, so all we have to do is replicate it as a GIMPLE_CALL tuple. */ gimple_stmt_iterator gsi; call = gimple_build_call_from_tree (*expr_p); gimple_call_set_fntype (call, TREE_TYPE (fnptrtype)); gimplify_seq_add_stmt (pre_p, call); gsi = gsi_last (*pre_p); fold_stmt (&gsi); *expr_p = NULL_TREE; } else /* Remember the original function type. */ CALL_EXPR_FN (*expr_p) = build1 (NOP_EXPR, fnptrtype, CALL_EXPR_FN (*expr_p)); return ret; } /* Handle shortcut semantics in the predicate operand of a COND_EXPR by rewriting it into multiple COND_EXPRs, and possibly GOTO_EXPRs. TRUE_LABEL_P and FALSE_LABEL_P point to the labels to jump to if the condition is true or false, respectively. If null, we should generate our own to skip over the evaluation of this specific expression. LOCUS is the source location of the COND_EXPR. This function is the tree equivalent of do_jump. shortcut_cond_r should only be called by shortcut_cond_expr. */ static tree shortcut_cond_r (tree pred, tree *true_label_p, tree *false_label_p, location_t locus) { tree local_label = NULL_TREE; tree t, expr = NULL; /* OK, it's not a simple case; we need to pull apart the COND_EXPR to retain the shortcut semantics. Just insert the gotos here; shortcut_cond_expr will append the real blocks later. */ if (TREE_CODE (pred) == TRUTH_ANDIF_EXPR) { location_t new_locus; /* Turn if (a && b) into if (a); else goto no; if (b) goto yes; else goto no; (no:) */ if (false_label_p == NULL) false_label_p = &local_label; /* Keep the original source location on the first 'if'. */ t = shortcut_cond_r (TREE_OPERAND (pred, 0), NULL, false_label_p, locus); append_to_statement_list (t, &expr); /* Set the source location of the && on the second 'if'. */ new_locus = EXPR_HAS_LOCATION (pred) ? EXPR_LOCATION (pred) : locus; t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p, new_locus); append_to_statement_list (t, &expr); } else if (TREE_CODE (pred) == TRUTH_ORIF_EXPR) { location_t new_locus; /* Turn if (a || b) into if (a) goto yes; if (b) goto yes; else goto no; (yes:) */ if (true_label_p == NULL) true_label_p = &local_label; /* Keep the original source location on the first 'if'. */ t = shortcut_cond_r (TREE_OPERAND (pred, 0), true_label_p, NULL, locus); append_to_statement_list (t, &expr); /* Set the source location of the || on the second 'if'. */ new_locus = EXPR_HAS_LOCATION (pred) ? EXPR_LOCATION (pred) : locus; t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p, new_locus); append_to_statement_list (t, &expr); } else if (TREE_CODE (pred) == COND_EXPR && !VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (pred, 1))) && !VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (pred, 2)))) { location_t new_locus; /* As long as we're messing with gotos, turn if (a ? b : c) into if (a) if (b) goto yes; else goto no; else if (c) goto yes; else goto no; Don't do this if one of the arms has void type, which can happen in C++ when the arm is throw. */ /* Keep the original source location on the first 'if'. Set the source location of the ? on the second 'if'. */ new_locus = EXPR_HAS_LOCATION (pred) ? EXPR_LOCATION (pred) : locus; expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (pred, 0), shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p, locus), shortcut_cond_r (TREE_OPERAND (pred, 2), true_label_p, false_label_p, new_locus)); } else { expr = build3 (COND_EXPR, void_type_node, pred, build_and_jump (true_label_p), build_and_jump (false_label_p)); SET_EXPR_LOCATION (expr, locus); } if (local_label) { t = build1 (LABEL_EXPR, void_type_node, local_label); append_to_statement_list (t, &expr); } return expr; } /* Given a conditional expression EXPR with short-circuit boolean predicates using TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR, break the predicate appart into the equivalent sequence of conditionals. */ static tree shortcut_cond_expr (tree expr) { tree pred = TREE_OPERAND (expr, 0); tree then_ = TREE_OPERAND (expr, 1); tree else_ = TREE_OPERAND (expr, 2); tree true_label, false_label, end_label, t; tree *true_label_p; tree *false_label_p; bool emit_end, emit_false, jump_over_else; bool then_se = then_ && TREE_SIDE_EFFECTS (then_); bool else_se = else_ && TREE_SIDE_EFFECTS (else_); /* First do simple transformations. */ if (!else_se) { /* If there is no 'else', turn if (a && b) then c into if (a) if (b) then c. */ while (TREE_CODE (pred) == TRUTH_ANDIF_EXPR) { /* Keep the original source location on the first 'if'. */ location_t locus = EXPR_LOC_OR_HERE (expr); TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1); /* Set the source location of the && on the second 'if'. */ if (EXPR_HAS_LOCATION (pred)) SET_EXPR_LOCATION (expr, EXPR_LOCATION (pred)); then_ = shortcut_cond_expr (expr); then_se = then_ && TREE_SIDE_EFFECTS (then_); pred = TREE_OPERAND (pred, 0); expr = build3 (COND_EXPR, void_type_node, pred, then_, NULL_TREE); SET_EXPR_LOCATION (expr, locus); } } if (!then_se) { /* If there is no 'then', turn if (a || b); else d into if (a); else if (b); else d. */ while (TREE_CODE (pred) == TRUTH_ORIF_EXPR) { /* Keep the original source location on the first 'if'. */ location_t locus = EXPR_LOC_OR_HERE (expr); TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1); /* Set the source location of the || on the second 'if'. */ if (EXPR_HAS_LOCATION (pred)) SET_EXPR_LOCATION (expr, EXPR_LOCATION (pred)); else_ = shortcut_cond_expr (expr); else_se = else_ && TREE_SIDE_EFFECTS (else_); pred = TREE_OPERAND (pred, 0); expr = build3 (COND_EXPR, void_type_node, pred, NULL_TREE, else_); SET_EXPR_LOCATION (expr, locus); } } /* If we're done, great. */ if (TREE_CODE (pred) != TRUTH_ANDIF_EXPR && TREE_CODE (pred) != TRUTH_ORIF_EXPR) return expr; /* Otherwise we need to mess with gotos. Change if (a) c; else d; to if (a); else goto no; c; goto end; no: d; end: and recursively gimplify the condition. */ true_label = false_label = end_label = NULL_TREE; /* If our arms just jump somewhere, hijack those labels so we don't generate jumps to jumps. */ if (then_ && TREE_CODE (then_) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (then_)) == LABEL_DECL) { true_label = GOTO_DESTINATION (then_); then_ = NULL; then_se = false; } if (else_ && TREE_CODE (else_) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (else_)) == LABEL_DECL) { false_label = GOTO_DESTINATION (else_); else_ = NULL; else_se = false; } /* If we aren't hijacking a label for the 'then' branch, it falls through. */ if (true_label) true_label_p = &true_label; else true_label_p = NULL; /* The 'else' branch also needs a label if it contains interesting code. */ if (false_label || else_se) false_label_p = &false_label; else false_label_p = NULL; /* If there was nothing else in our arms, just forward the label(s). */ if (!then_se && !else_se) return shortcut_cond_r (pred, true_label_p, false_label_p, EXPR_LOC_OR_HERE (expr)); /* If our last subexpression already has a terminal label, reuse it. */ if (else_se) t = expr_last (else_); else if (then_se) t = expr_last (then_); else t = NULL; if (t && TREE_CODE (t) == LABEL_EXPR) end_label = LABEL_EXPR_LABEL (t); /* If we don't care about jumping to the 'else' branch, jump to the end if the condition is false. */ if (!false_label_p) false_label_p = &end_label; /* We only want to emit these labels if we aren't hijacking them. */ emit_end = (end_label == NULL_TREE); emit_false = (false_label == NULL_TREE); /* We only emit the jump over the else clause if we have to--if the then clause may fall through. Otherwise we can wind up with a useless jump and a useless label at the end of gimplified code, which will cause us to think that this conditional as a whole falls through even if it doesn't. If we then inline a function which ends with such a condition, that can cause us to issue an inappropriate warning about control reaching the end of a non-void function. */ jump_over_else = block_may_fallthru (then_); pred = shortcut_cond_r (pred, true_label_p, false_label_p, EXPR_LOC_OR_HERE (expr)); expr = NULL; append_to_statement_list (pred, &expr); append_to_statement_list (then_, &expr); if (else_se) { if (jump_over_else) { tree last = expr_last (expr); t = build_and_jump (&end_label); if (EXPR_HAS_LOCATION (last)) SET_EXPR_LOCATION (t, EXPR_LOCATION (last)); append_to_statement_list (t, &expr); } if (emit_false) { t = build1 (LABEL_EXPR, void_type_node, false_label); append_to_statement_list (t, &expr); } append_to_statement_list (else_, &expr); } if (emit_end && end_label) { t = build1 (LABEL_EXPR, void_type_node, end_label); append_to_statement_list (t, &expr); } return expr; } /* EXPR is used in a boolean context; make sure it has BOOLEAN_TYPE. */ tree gimple_boolify (tree expr) { tree type = TREE_TYPE (expr); location_t loc = EXPR_LOCATION (expr); if (TREE_CODE (expr) == NE_EXPR && TREE_CODE (TREE_OPERAND (expr, 0)) == CALL_EXPR && integer_zerop (TREE_OPERAND (expr, 1))) { tree call = TREE_OPERAND (expr, 0); tree fn = get_callee_fndecl (call); /* For __builtin_expect ((long) (x), y) recurse into x as well if x is truth_value_p. */ if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (fn) == BUILT_IN_EXPECT && call_expr_nargs (call) == 2) { tree arg = CALL_EXPR_ARG (call, 0); if (arg) { if (TREE_CODE (arg) == NOP_EXPR && TREE_TYPE (arg) == TREE_TYPE (call)) arg = TREE_OPERAND (arg, 0); if (truth_value_p (TREE_CODE (arg))) { arg = gimple_boolify (arg); CALL_EXPR_ARG (call, 0) = fold_convert_loc (loc, TREE_TYPE (call), arg); } } } } switch (TREE_CODE (expr)) { case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: /* Also boolify the arguments of truth exprs. */ TREE_OPERAND (expr, 1) = gimple_boolify (TREE_OPERAND (expr, 1)); /* FALLTHRU */ case TRUTH_NOT_EXPR: TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0)); /* These expressions always produce boolean results. */ if (TREE_CODE (type) != BOOLEAN_TYPE) TREE_TYPE (expr) = boolean_type_node; return expr; default: if (COMPARISON_CLASS_P (expr)) { /* There expressions always prduce boolean results. */ if (TREE_CODE (type) != BOOLEAN_TYPE) TREE_TYPE (expr) = boolean_type_node; return expr; } /* Other expressions that get here must have boolean values, but might need to be converted to the appropriate mode. */ if (TREE_CODE (type) == BOOLEAN_TYPE) return expr; return fold_convert_loc (loc, boolean_type_node, expr); } } /* Given a conditional expression *EXPR_P without side effects, gimplify its operands. New statements are inserted to PRE_P. */ static enum gimplify_status gimplify_pure_cond_expr (tree *expr_p, gimple_seq *pre_p) { tree expr = *expr_p, cond; enum gimplify_status ret, tret; enum tree_code code; cond = gimple_boolify (COND_EXPR_COND (expr)); /* We need to handle && and || specially, as their gimplification creates pure cond_expr, thus leading to an infinite cycle otherwise. */ code = TREE_CODE (cond); if (code == TRUTH_ANDIF_EXPR) TREE_SET_CODE (cond, TRUTH_AND_EXPR); else if (code == TRUTH_ORIF_EXPR) TREE_SET_CODE (cond, TRUTH_OR_EXPR); ret = gimplify_expr (&cond, pre_p, NULL, is_gimple_condexpr, fb_rvalue); COND_EXPR_COND (*expr_p) = cond; tret = gimplify_expr (&COND_EXPR_THEN (expr), pre_p, NULL, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); tret = gimplify_expr (&COND_EXPR_ELSE (expr), pre_p, NULL, is_gimple_val, fb_rvalue); return MIN (ret, tret); } /* Return true if evaluating EXPR could trap. EXPR is GENERIC, while tree_could_trap_p can be called only on GIMPLE. */ static bool generic_expr_could_trap_p (tree expr) { unsigned i, n; if (!expr || is_gimple_val (expr)) return false; if (!EXPR_P (expr) || tree_could_trap_p (expr)) return true; n = TREE_OPERAND_LENGTH (expr); for (i = 0; i < n; i++) if (generic_expr_could_trap_p (TREE_OPERAND (expr, i))) return true; return false; } /* Convert the conditional expression pointed to by EXPR_P '(p) ? a : b;' into if (p) if (p) t1 = a; a; else or else t1 = b; b; t1; The second form is used when *EXPR_P is of type void. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_cond_expr (tree *expr_p, gimple_seq *pre_p, fallback_t fallback) { tree expr = *expr_p; tree type = TREE_TYPE (expr); location_t loc = EXPR_LOCATION (expr); tree tmp, arm1, arm2; enum gimplify_status ret; tree label_true, label_false, label_cont; bool have_then_clause_p, have_else_clause_p; gimple gimple_cond; enum tree_code pred_code; gimple_seq seq = NULL; /* If this COND_EXPR has a value, copy the values into a temporary within the arms. */ if (!VOID_TYPE_P (type)) { tree then_ = TREE_OPERAND (expr, 1), else_ = TREE_OPERAND (expr, 2); tree result; /* If either an rvalue is ok or we do not require an lvalue, create the temporary. But we cannot do that if the type is addressable. */ if (((fallback & fb_rvalue) || !(fallback & fb_lvalue)) && !TREE_ADDRESSABLE (type)) { if (gimplify_ctxp->allow_rhs_cond_expr /* If either branch has side effects or could trap, it can't be evaluated unconditionally. */ && !TREE_SIDE_EFFECTS (then_) && !generic_expr_could_trap_p (then_) && !TREE_SIDE_EFFECTS (else_) && !generic_expr_could_trap_p (else_)) return gimplify_pure_cond_expr (expr_p, pre_p); tmp = create_tmp_var (type, "iftmp"); result = tmp; } /* Otherwise, only create and copy references to the values. */ else { type = build_pointer_type (type); if (!VOID_TYPE_P (TREE_TYPE (then_))) then_ = build_fold_addr_expr_loc (loc, then_); if (!VOID_TYPE_P (TREE_TYPE (else_))) else_ = build_fold_addr_expr_loc (loc, else_); expr = build3 (COND_EXPR, type, TREE_OPERAND (expr, 0), then_, else_); tmp = create_tmp_var (type, "iftmp"); result = build_simple_mem_ref_loc (loc, tmp); } /* Build the new then clause, `tmp = then_;'. But don't build the assignment if the value is void; in C++ it can be if it's a throw. */ if (!VOID_TYPE_P (TREE_TYPE (then_))) TREE_OPERAND (expr, 1) = build2 (MODIFY_EXPR, type, tmp, then_); /* Similarly, build the new else clause, `tmp = else_;'. */ if (!VOID_TYPE_P (TREE_TYPE (else_))) TREE_OPERAND (expr, 2) = build2 (MODIFY_EXPR, type, tmp, else_); TREE_TYPE (expr) = void_type_node; recalculate_side_effects (expr); /* Move the COND_EXPR to the prequeue. */ gimplify_stmt (&expr, pre_p); *expr_p = result; return GS_ALL_DONE; } /* Remove any COMPOUND_EXPR so the following cases will be caught. */ STRIP_TYPE_NOPS (TREE_OPERAND (expr, 0)); if (TREE_CODE (TREE_OPERAND (expr, 0)) == COMPOUND_EXPR) gimplify_compound_expr (&TREE_OPERAND (expr, 0), pre_p, true); /* Make sure the condition has BOOLEAN_TYPE. */ TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0)); /* Break apart && and || conditions. */ if (TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ANDIF_EXPR || TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ORIF_EXPR) { expr = shortcut_cond_expr (expr); if (expr != *expr_p) { *expr_p = expr; /* We can't rely on gimplify_expr to re-gimplify the expanded form properly, as cleanups might cause the target labels to be wrapped in a TRY_FINALLY_EXPR. To prevent that, we need to set up a conditional context. */ gimple_push_condition (); gimplify_stmt (expr_p, &seq); gimple_pop_condition (pre_p); gimple_seq_add_seq (pre_p, seq); return GS_ALL_DONE; } } /* Now do the normal gimplification. */ /* Gimplify condition. */ ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL, is_gimple_condexpr, fb_rvalue); if (ret == GS_ERROR) return GS_ERROR; gcc_assert (TREE_OPERAND (expr, 0) != NULL_TREE); gimple_push_condition (); have_then_clause_p = have_else_clause_p = false; if (TREE_OPERAND (expr, 1) != NULL && TREE_CODE (TREE_OPERAND (expr, 1)) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (TREE_OPERAND (expr, 1))) == LABEL_DECL && (DECL_CONTEXT (GOTO_DESTINATION (TREE_OPERAND (expr, 1))) == current_function_decl) /* For -O0 avoid this optimization if the COND_EXPR and GOTO_EXPR have different locations, otherwise we end up with incorrect location information on the branches. */ && (optimize || !EXPR_HAS_LOCATION (expr) || !EXPR_HAS_LOCATION (TREE_OPERAND (expr, 1)) || EXPR_LOCATION (expr) == EXPR_LOCATION (TREE_OPERAND (expr, 1)))) { label_true = GOTO_DESTINATION (TREE_OPERAND (expr, 1)); have_then_clause_p = true; } else label_true = create_artificial_label (UNKNOWN_LOCATION); if (TREE_OPERAND (expr, 2) != NULL && TREE_CODE (TREE_OPERAND (expr, 2)) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (TREE_OPERAND (expr, 2))) == LABEL_DECL && (DECL_CONTEXT (GOTO_DESTINATION (TREE_OPERAND (expr, 2))) == current_function_decl) /* For -O0 avoid this optimization if the COND_EXPR and GOTO_EXPR have different locations, otherwise we end up with incorrect location information on the branches. */ && (optimize || !EXPR_HAS_LOCATION (expr) || !EXPR_HAS_LOCATION (TREE_OPERAND (expr, 2)) || EXPR_LOCATION (expr) == EXPR_LOCATION (TREE_OPERAND (expr, 2)))) { label_false = GOTO_DESTINATION (TREE_OPERAND (expr, 2)); have_else_clause_p = true; } else label_false = create_artificial_label (UNKNOWN_LOCATION); gimple_cond_get_ops_from_tree (COND_EXPR_COND (expr), &pred_code, &arm1, &arm2); gimple_cond = gimple_build_cond (pred_code, arm1, arm2, label_true, label_false); gimplify_seq_add_stmt (&seq, gimple_cond); label_cont = NULL_TREE; if (!have_then_clause_p) { /* For if (...) {} else { code; } put label_true after the else block. */ if (TREE_OPERAND (expr, 1) == NULL_TREE && !have_else_clause_p && TREE_OPERAND (expr, 2) != NULL_TREE) label_cont = label_true; else { gimplify_seq_add_stmt (&seq, gimple_build_label (label_true)); have_then_clause_p = gimplify_stmt (&TREE_OPERAND (expr, 1), &seq); /* For if (...) { code; } else {} or if (...) { code; } else goto label; or if (...) { code; return; } else { ... } label_cont isn't needed. */ if (!have_else_clause_p && TREE_OPERAND (expr, 2) != NULL_TREE && gimple_seq_may_fallthru (seq)) { gimple g; label_cont = create_artificial_label (UNKNOWN_LOCATION); g = gimple_build_goto (label_cont); /* GIMPLE_COND's are very low level; they have embedded gotos. This particular embedded goto should not be marked with the location of the original COND_EXPR, as it would correspond to the COND_EXPR's condition, not the ELSE or the THEN arms. To avoid marking it with the wrong location, flag it as "no location". */ gimple_set_do_not_emit_location (g); gimplify_seq_add_stmt (&seq, g); } } } if (!have_else_clause_p) { gimplify_seq_add_stmt (&seq, gimple_build_label (label_false)); have_else_clause_p = gimplify_stmt (&TREE_OPERAND (expr, 2), &seq); } if (label_cont) gimplify_seq_add_stmt (&seq, gimple_build_label (label_cont)); gimple_pop_condition (pre_p); gimple_seq_add_seq (pre_p, seq); if (ret == GS_ERROR) ; /* Do nothing. */ else if (have_then_clause_p || have_else_clause_p) ret = GS_ALL_DONE; else { /* Both arms are empty; replace the COND_EXPR with its predicate. */ expr = TREE_OPERAND (expr, 0); gimplify_stmt (&expr, pre_p); } *expr_p = NULL; return ret; } /* Prepare the node pointed to by EXPR_P, an is_gimple_addressable expression, to be marked addressable. We cannot rely on such an expression being directly markable if a temporary has been created by the gimplification. In this case, we create another temporary and initialize it with a copy, which will become a store after we mark it addressable. This can happen if the front-end passed us something that it could not mark addressable yet, like a Fortran pass-by-reference parameter (int) floatvar. */ static void prepare_gimple_addressable (tree *expr_p, gimple_seq *seq_p) { while (handled_component_p (*expr_p)) expr_p = &TREE_OPERAND (*expr_p, 0); if (is_gimple_reg (*expr_p)) *expr_p = get_initialized_tmp_var (*expr_p, seq_p, NULL); } /* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with a call to __builtin_memcpy. */ static enum gimplify_status gimplify_modify_expr_to_memcpy (tree *expr_p, tree size, bool want_value, gimple_seq *seq_p) { tree t, to, to_ptr, from, from_ptr; gimple gs; location_t loc = EXPR_LOCATION (*expr_p); to = TREE_OPERAND (*expr_p, 0); from = TREE_OPERAND (*expr_p, 1); /* Mark the RHS addressable. Beware that it may not be possible to do so directly if a temporary has been created by the gimplification. */ prepare_gimple_addressable (&from, seq_p); mark_addressable (from); from_ptr = build_fold_addr_expr_loc (loc, from); gimplify_arg (&from_ptr, seq_p, loc); mark_addressable (to); to_ptr = build_fold_addr_expr_loc (loc, to); gimplify_arg (&to_ptr, seq_p, loc); t = builtin_decl_implicit (BUILT_IN_MEMCPY); gs = gimple_build_call (t, 3, to_ptr, from_ptr, size); if (want_value) { /* tmp = memcpy() */ t = create_tmp_var (TREE_TYPE (to_ptr), NULL); gimple_call_set_lhs (gs, t); gimplify_seq_add_stmt (seq_p, gs); *expr_p = build_simple_mem_ref (t); return GS_ALL_DONE; } gimplify_seq_add_stmt (seq_p, gs); *expr_p = NULL; return GS_ALL_DONE; } /* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with a call to __builtin_memset. In this case we know that the RHS is a CONSTRUCTOR with an empty element list. */ static enum gimplify_status gimplify_modify_expr_to_memset (tree *expr_p, tree size, bool want_value, gimple_seq *seq_p) { tree t, from, to, to_ptr; gimple gs; location_t loc = EXPR_LOCATION (*expr_p); /* Assert our assumptions, to abort instead of producing wrong code silently if they are not met. Beware that the RHS CONSTRUCTOR might not be immediately exposed. */ from = TREE_OPERAND (*expr_p, 1); if (TREE_CODE (from) == WITH_SIZE_EXPR) from = TREE_OPERAND (from, 0); gcc_assert (TREE_CODE (from) == CONSTRUCTOR && VEC_empty (constructor_elt, CONSTRUCTOR_ELTS (from))); /* Now proceed. */ to = TREE_OPERAND (*expr_p, 0); to_ptr = build_fold_addr_expr_loc (loc, to); gimplify_arg (&to_ptr, seq_p, loc); t = builtin_decl_implicit (BUILT_IN_MEMSET); gs = gimple_build_call (t, 3, to_ptr, integer_zero_node, size); if (want_value) { /* tmp = memset() */ t = create_tmp_var (TREE_TYPE (to_ptr), NULL); gimple_call_set_lhs (gs, t); gimplify_seq_add_stmt (seq_p, gs); *expr_p = build1 (INDIRECT_REF, TREE_TYPE (to), t); return GS_ALL_DONE; } gimplify_seq_add_stmt (seq_p, gs); *expr_p = NULL; return GS_ALL_DONE; } /* A subroutine of gimplify_init_ctor_preeval. Called via walk_tree, determine, cautiously, if a CONSTRUCTOR overlaps the lhs of an assignment. Return non-null if we detect a potential overlap. */ struct gimplify_init_ctor_preeval_data { /* The base decl of the lhs object. May be NULL, in which case we have to assume the lhs is indirect. */ tree lhs_base_decl; /* The alias set of the lhs object. */ alias_set_type lhs_alias_set; }; static tree gimplify_init_ctor_preeval_1 (tree *tp, int *walk_subtrees, void *xdata) { struct gimplify_init_ctor_preeval_data *data = (struct gimplify_init_ctor_preeval_data *) xdata; tree t = *tp; /* If we find the base object, obviously we have overlap. */ if (data->lhs_base_decl == t) return t; /* If the constructor component is indirect, determine if we have a potential overlap with the lhs. The only bits of information we have to go on at this point are addressability and alias sets. */ if ((INDIRECT_REF_P (t) || TREE_CODE (t) == MEM_REF) && (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl)) && alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (t))) return t; /* If the constructor component is a call, determine if it can hide a potential overlap with the lhs through an INDIRECT_REF like above. ??? Ugh - this is completely broken. In fact this whole analysis doesn't look conservative. */ if (TREE_CODE (t) == CALL_EXPR) { tree type, fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (t))); for (type = TYPE_ARG_TYPES (fntype); type; type = TREE_CHAIN (type)) if (POINTER_TYPE_P (TREE_VALUE (type)) && (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl)) && alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (TREE_TYPE (TREE_VALUE (type))))) return t; } if (IS_TYPE_OR_DECL_P (t)) *walk_subtrees = 0; return NULL; } /* A subroutine of gimplify_init_constructor. Pre-evaluate EXPR, force values that overlap with the lhs (as described by *DATA) into temporaries. */ static void gimplify_init_ctor_preeval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, struct gimplify_init_ctor_preeval_data *data) { enum gimplify_status one; /* If the value is constant, then there's nothing to pre-evaluate. */ if (TREE_CONSTANT (*expr_p)) { /* Ensure it does not have side effects, it might contain a reference to the object we're initializing. */ gcc_assert (!TREE_SIDE_EFFECTS (*expr_p)); return; } /* If the type has non-trivial constructors, we can't pre-evaluate. */ if (TREE_ADDRESSABLE (TREE_TYPE (*expr_p))) return; /* Recurse for nested constructors. */ if (TREE_CODE (*expr_p) == CONSTRUCTOR) { unsigned HOST_WIDE_INT ix; constructor_elt *ce; VEC(constructor_elt,gc) *v = CONSTRUCTOR_ELTS (*expr_p); FOR_EACH_VEC_ELT (constructor_elt, v, ix, ce) gimplify_init_ctor_preeval (&ce->value, pre_p, post_p, data); return; } /* If this is a variable sized type, we must remember the size. */ maybe_with_size_expr (expr_p); /* Gimplify the constructor element to something appropriate for the rhs of a MODIFY_EXPR. Given that we know the LHS is an aggregate, we know the gimplifier will consider this a store to memory. Doing this gimplification now means that we won't have to deal with complicated language-specific trees, nor trees like SAVE_EXPR that can induce exponential search behavior. */ one = gimplify_expr (expr_p, pre_p, post_p, is_gimple_mem_rhs, fb_rvalue); if (one == GS_ERROR) { *expr_p = NULL; return; } /* If we gimplified to a bare decl, we can be sure that it doesn't overlap with the lhs, since "a = { .x=a }" doesn't make sense. This will always be true for all scalars, since is_gimple_mem_rhs insists on a temporary variable for them. */ if (DECL_P (*expr_p)) return; /* If this is of variable size, we have no choice but to assume it doesn't overlap since we can't make a temporary for it. */ if (TREE_CODE (TYPE_SIZE (TREE_TYPE (*expr_p))) != INTEGER_CST) return; /* Otherwise, we must search for overlap ... */ if (!walk_tree (expr_p, gimplify_init_ctor_preeval_1, data, NULL)) return; /* ... and if found, force the value into a temporary. */ *expr_p = get_formal_tmp_var (*expr_p, pre_p); } /* A subroutine of gimplify_init_ctor_eval. Create a loop for a RANGE_EXPR in a CONSTRUCTOR for an array. var = lower; loop_entry: object[var] = value; if (var == upper) goto loop_exit; var = var + 1; goto loop_entry; loop_exit: We increment var _after_ the loop exit check because we might otherwise fail if upper == TYPE_MAX_VALUE (type for upper). Note that we never have to deal with SAVE_EXPRs here, because this has already been taken care of for us, in gimplify_init_ctor_preeval(). */ static void gimplify_init_ctor_eval (tree, VEC(constructor_elt,gc) *, gimple_seq *, bool); static void gimplify_init_ctor_eval_range (tree object, tree lower, tree upper, tree value, tree array_elt_type, gimple_seq *pre_p, bool cleared) { tree loop_entry_label, loop_exit_label, fall_thru_label; tree var, var_type, cref, tmp; loop_entry_label = create_artificial_label (UNKNOWN_LOCATION); loop_exit_label = create_artificial_label (UNKNOWN_LOCATION); fall_thru_label = create_artificial_label (UNKNOWN_LOCATION); /* Create and initialize the index variable. */ var_type = TREE_TYPE (upper); var = create_tmp_var (var_type, NULL); gimplify_seq_add_stmt (pre_p, gimple_build_assign (var, lower)); /* Add the loop entry label. */ gimplify_seq_add_stmt (pre_p, gimple_build_label (loop_entry_label)); /* Build the reference. */ cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object), var, NULL_TREE, NULL_TREE); /* If we are a constructor, just call gimplify_init_ctor_eval to do the store. Otherwise just assign value to the reference. */ if (TREE_CODE (value) == CONSTRUCTOR) /* NB we might have to call ourself recursively through gimplify_init_ctor_eval if the value is a constructor. */ gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value), pre_p, cleared); else gimplify_seq_add_stmt (pre_p, gimple_build_assign (cref, value)); /* We exit the loop when the index var is equal to the upper bound. */ gimplify_seq_add_stmt (pre_p, gimple_build_cond (EQ_EXPR, var, upper, loop_exit_label, fall_thru_label)); gimplify_seq_add_stmt (pre_p, gimple_build_label (fall_thru_label)); /* Otherwise, increment the index var... */ tmp = build2 (PLUS_EXPR, var_type, var, fold_convert (var_type, integer_one_node)); gimplify_seq_add_stmt (pre_p, gimple_build_assign (var, tmp)); /* ...and jump back to the loop entry. */ gimplify_seq_add_stmt (pre_p, gimple_build_goto (loop_entry_label)); /* Add the loop exit label. */ gimplify_seq_add_stmt (pre_p, gimple_build_label (loop_exit_label)); } /* Return true if FDECL is accessing a field that is zero sized. */ static bool zero_sized_field_decl (const_tree fdecl) { if (TREE_CODE (fdecl) == FIELD_DECL && DECL_SIZE (fdecl) && integer_zerop (DECL_SIZE (fdecl))) return true; return false; } /* Return true if TYPE is zero sized. */ static bool zero_sized_type (const_tree type) { if (AGGREGATE_TYPE_P (type) && TYPE_SIZE (type) && integer_zerop (TYPE_SIZE (type))) return true; return false; } /* A subroutine of gimplify_init_constructor. Generate individual MODIFY_EXPRs for a CONSTRUCTOR. OBJECT is the LHS against which the assignments should happen. ELTS is the CONSTRUCTOR_ELTS of the CONSTRUCTOR. CLEARED is true if the entire LHS object has been zeroed first. */ static void gimplify_init_ctor_eval (tree object, VEC(constructor_elt,gc) *elts, gimple_seq *pre_p, bool cleared) { tree array_elt_type = NULL; unsigned HOST_WIDE_INT ix; tree purpose, value; if (TREE_CODE (TREE_TYPE (object)) == ARRAY_TYPE) array_elt_type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (object))); FOR_EACH_CONSTRUCTOR_ELT (elts, ix, purpose, value) { tree cref; /* NULL values are created above for gimplification errors. */ if (value == NULL) continue; if (cleared && initializer_zerop (value)) continue; /* ??? Here's to hoping the front end fills in all of the indices, so we don't have to figure out what's missing ourselves. */ gcc_assert (purpose); /* Skip zero-sized fields, unless value has side-effects. This can happen with calls to functions returning a zero-sized type, which we shouldn't discard. As a number of downstream passes don't expect sets of zero-sized fields, we rely on the gimplification of the MODIFY_EXPR we make below to drop the assignment statement. */ if (! TREE_SIDE_EFFECTS (value) && zero_sized_field_decl (purpose)) continue; /* If we have a RANGE_EXPR, we have to build a loop to assign the whole range. */ if (TREE_CODE (purpose) == RANGE_EXPR) { tree lower = TREE_OPERAND (purpose, 0); tree upper = TREE_OPERAND (purpose, 1); /* If the lower bound is equal to upper, just treat it as if upper was the index. */ if (simple_cst_equal (lower, upper)) purpose = upper; else { gimplify_init_ctor_eval_range (object, lower, upper, value, array_elt_type, pre_p, cleared); continue; } } if (array_elt_type) { /* Do not use bitsizetype for ARRAY_REF indices. */ if (TYPE_DOMAIN (TREE_TYPE (object))) purpose = fold_convert (TREE_TYPE (TYPE_DOMAIN (TREE_TYPE (object))), purpose); cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object), purpose, NULL_TREE, NULL_TREE); } else { gcc_assert (TREE_CODE (purpose) == FIELD_DECL); cref = build3 (COMPONENT_REF, TREE_TYPE (purpose), unshare_expr (object), purpose, NULL_TREE); } if (TREE_CODE (value) == CONSTRUCTOR && TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE) gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value), pre_p, cleared); else { tree init = build2 (INIT_EXPR, TREE_TYPE (cref), cref, value); gimplify_and_add (init, pre_p); ggc_free (init); } } } /* Return the appropriate RHS predicate for this LHS. */ gimple_predicate rhs_predicate_for (tree lhs) { if (is_gimple_reg (lhs)) return is_gimple_reg_rhs_or_call; else return is_gimple_mem_rhs_or_call; } /* Gimplify a C99 compound literal expression. This just means adding the DECL_EXPR before the current statement and using its anonymous decl instead. */ static enum gimplify_status gimplify_compound_literal_expr (tree *expr_p, gimple_seq *pre_p) { tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (*expr_p); tree decl = DECL_EXPR_DECL (decl_s); /* Mark the decl as addressable if the compound literal expression is addressable now, otherwise it is marked too late after we gimplify the initialization expression. */ if (TREE_ADDRESSABLE (*expr_p)) TREE_ADDRESSABLE (decl) = 1; /* Preliminarily mark non-addressed complex variables as eligible for promotion to gimple registers. We'll transform their uses as we find them. */ if ((TREE_CODE (TREE_TYPE (decl)) == COMPLEX_TYPE || TREE_CODE (TREE_TYPE (decl)) == VECTOR_TYPE) && !TREE_THIS_VOLATILE (decl) && !needs_to_live_in_memory (decl)) DECL_GIMPLE_REG_P (decl) = 1; /* This decl isn't mentioned in the enclosing block, so add it to the list of temps. FIXME it seems a bit of a kludge to say that anonymous artificial vars aren't pushed, but everything else is. */ if (DECL_NAME (decl) == NULL_TREE && !DECL_SEEN_IN_BIND_EXPR_P (decl)) gimple_add_tmp_var (decl); gimplify_and_add (decl_s, pre_p); *expr_p = decl; return GS_OK; } /* Optimize embedded COMPOUND_LITERAL_EXPRs within a CONSTRUCTOR, return a new CONSTRUCTOR if something changed. */ static tree optimize_compound_literals_in_ctor (tree orig_ctor) { tree ctor = orig_ctor; VEC(constructor_elt,gc) *elts = CONSTRUCTOR_ELTS (ctor); unsigned int idx, num = VEC_length (constructor_elt, elts); for (idx = 0; idx < num; idx++) { tree value = VEC_index (constructor_elt, elts, idx)->value; tree newval = value; if (TREE_CODE (value) == CONSTRUCTOR) newval = optimize_compound_literals_in_ctor (value); else if (TREE_CODE (value) == COMPOUND_LITERAL_EXPR) { tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (value); tree decl = DECL_EXPR_DECL (decl_s); tree init = DECL_INITIAL (decl); if (!TREE_ADDRESSABLE (value) && !TREE_ADDRESSABLE (decl) && init) newval = optimize_compound_literals_in_ctor (init); } if (newval == value) continue; if (ctor == orig_ctor) { ctor = copy_node (orig_ctor); CONSTRUCTOR_ELTS (ctor) = VEC_copy (constructor_elt, gc, elts); elts = CONSTRUCTOR_ELTS (ctor); } VEC_index (constructor_elt, elts, idx)->value = newval; } return ctor; } /* A subroutine of gimplify_modify_expr. Break out elements of a CONSTRUCTOR used as an initializer into separate MODIFY_EXPRs. Note that we still need to clear any elements that don't have explicit initializers, so if not all elements are initialized we keep the original MODIFY_EXPR, we just remove all of the constructor elements. If NOTIFY_TEMP_CREATION is true, do not gimplify, just return GS_ERROR if we would have to create a temporary when gimplifying this constructor. Otherwise, return GS_OK. If NOTIFY_TEMP_CREATION is false, just do the gimplification. */ static enum gimplify_status gimplify_init_constructor (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, bool want_value, bool notify_temp_creation) { tree object, ctor, type; enum gimplify_status ret; VEC(constructor_elt,gc) *elts; gcc_assert (TREE_CODE (TREE_OPERAND (*expr_p, 1)) == CONSTRUCTOR); if (!notify_temp_creation) { ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; } object = TREE_OPERAND (*expr_p, 0); ctor = TREE_OPERAND (*expr_p, 1) = optimize_compound_literals_in_ctor (TREE_OPERAND (*expr_p, 1)); type = TREE_TYPE (ctor); elts = CONSTRUCTOR_ELTS (ctor); ret = GS_ALL_DONE; switch (TREE_CODE (type)) { case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: case ARRAY_TYPE: { struct gimplify_init_ctor_preeval_data preeval_data; HOST_WIDE_INT num_ctor_elements, num_nonzero_elements; bool cleared, complete_p, valid_const_initializer; /* Aggregate types must lower constructors to initialization of individual elements. The exception is that a CONSTRUCTOR node with no elements indicates zero-initialization of the whole. */ if (VEC_empty (constructor_elt, elts)) { if (notify_temp_creation) return GS_OK; break; } /* Fetch information about the constructor to direct later processing. We might want to make static versions of it in various cases, and can only do so if it known to be a valid constant initializer. */ valid_const_initializer = categorize_ctor_elements (ctor, &num_nonzero_elements, &num_ctor_elements, &complete_p); /* If a const aggregate variable is being initialized, then it should never be a lose to promote the variable to be static. */ if (valid_const_initializer && num_nonzero_elements > 1 && TREE_READONLY (object) && TREE_CODE (object) == VAR_DECL && (flag_merge_constants >= 2 || !TREE_ADDRESSABLE (object))) { if (notify_temp_creation) return GS_ERROR; DECL_INITIAL (object) = ctor; TREE_STATIC (object) = 1; if (!DECL_NAME (object)) DECL_NAME (object) = create_tmp_var_name ("C"); walk_tree (&DECL_INITIAL (object), force_labels_r, NULL, NULL); /* ??? C++ doesn't automatically append a .<number> to the assembler name, and even when it does, it looks a FE private data structures to figure out what that number should be, which are not set for this variable. I suppose this is important for local statics for inline functions, which aren't "local" in the object file sense. So in order to get a unique TU-local symbol, we must invoke the lhd version now. */ lhd_set_decl_assembler_name (object); *expr_p = NULL_TREE; break; } /* If there are "lots" of initialized elements, even discounting those that are not address constants (and thus *must* be computed at runtime), then partition the constructor into constant and non-constant parts. Block copy the constant parts in, then generate code for the non-constant parts. */ /* TODO. There's code in cp/typeck.c to do this. */ if (int_size_in_bytes (TREE_TYPE (ctor)) < 0) /* store_constructor will ignore the clearing of variable-sized objects. Initializers for such objects must explicitly set every field that needs to be set. */ cleared = false; else if (!complete_p) /* If the constructor isn't complete, clear the whole object beforehand. ??? This ought not to be needed. For any element not present in the initializer, we should simply set them to zero. Except we'd need to *find* the elements that are not present, and that requires trickery to avoid quadratic compile-time behavior in large cases or excessive memory use in small cases. */ cleared = true; else if (num_ctor_elements - num_nonzero_elements > CLEAR_RATIO (optimize_function_for_speed_p (cfun)) && num_nonzero_elements < num_ctor_elements / 4) /* If there are "lots" of zeros, it's more efficient to clear the memory and then set the nonzero elements. */ cleared = true; else cleared = false; /* If there are "lots" of initialized elements, and all of them are valid address constants, then the entire initializer can be dropped to memory, and then memcpy'd out. Don't do this for sparse arrays, though, as it's more efficient to follow the standard CONSTRUCTOR behavior of memset followed by individual element initialization. Also don't do this for small all-zero initializers (which aren't big enough to merit clearing), and don't try to make bitwise copies of TREE_ADDRESSABLE types. */ if (valid_const_initializer && !(cleared || num_nonzero_elements == 0) && !TREE_ADDRESSABLE (type)) { HOST_WIDE_INT size = int_size_in_bytes (type); unsigned int align; /* ??? We can still get unbounded array types, at least from the C++ front end. This seems wrong, but attempt to work around it for now. */ if (size < 0) { size = int_size_in_bytes (TREE_TYPE (object)); if (size >= 0) TREE_TYPE (ctor) = type = TREE_TYPE (object); } /* Find the maximum alignment we can assume for the object. */ /* ??? Make use of DECL_OFFSET_ALIGN. */ if (DECL_P (object)) align = DECL_ALIGN (object); else align = TYPE_ALIGN (type); if (size > 0 && num_nonzero_elements > 1 && !can_move_by_pieces (size, align)) { if (notify_temp_creation) return GS_ERROR; walk_tree (&ctor, force_labels_r, NULL, NULL); ctor = tree_output_constant_def (ctor); if (!useless_type_conversion_p (type, TREE_TYPE (ctor))) ctor = build1 (VIEW_CONVERT_EXPR, type, ctor); TREE_OPERAND (*expr_p, 1) = ctor; /* This is no longer an assignment of a CONSTRUCTOR, but we still may have processing to do on the LHS. So pretend we didn't do anything here to let that happen. */ return GS_UNHANDLED; } } /* If the target is volatile, we have non-zero elements and more than one field to assign, initialize the target from a temporary. */ if (TREE_THIS_VOLATILE (object) && !TREE_ADDRESSABLE (type) && num_nonzero_elements > 0 && VEC_length (constructor_elt, elts) > 1) { tree temp = create_tmp_var (TYPE_MAIN_VARIANT (type), NULL); TREE_OPERAND (*expr_p, 0) = temp; *expr_p = build2 (COMPOUND_EXPR, TREE_TYPE (*expr_p), *expr_p, build2 (MODIFY_EXPR, void_type_node, object, temp)); return GS_OK; } if (notify_temp_creation) return GS_OK; /* If there are nonzero elements and if needed, pre-evaluate to capture elements overlapping with the lhs into temporaries. We must do this before clearing to fetch the values before they are zeroed-out. */ if (num_nonzero_elements > 0 && TREE_CODE (*expr_p) != INIT_EXPR) { preeval_data.lhs_base_decl = get_base_address (object); if (!DECL_P (preeval_data.lhs_base_decl)) preeval_data.lhs_base_decl = NULL; preeval_data.lhs_alias_set = get_alias_set (object); gimplify_init_ctor_preeval (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, &preeval_data); } if (cleared) { /* Zap the CONSTRUCTOR element list, which simplifies this case. Note that we still have to gimplify, in order to handle the case of variable sized types. Avoid shared tree structures. */ CONSTRUCTOR_ELTS (ctor) = NULL; TREE_SIDE_EFFECTS (ctor) = 0; object = unshare_expr (object); gimplify_stmt (expr_p, pre_p); } /* If we have not block cleared the object, or if there are nonzero elements in the constructor, add assignments to the individual scalar fields of the object. */ if (!cleared || num_nonzero_elements > 0) gimplify_init_ctor_eval (object, elts, pre_p, cleared); *expr_p = NULL_TREE; } break; case COMPLEX_TYPE: { tree r, i; if (notify_temp_creation) return GS_OK; /* Extract the real and imaginary parts out of the ctor. */ gcc_assert (VEC_length (constructor_elt, elts) == 2); r = VEC_index (constructor_elt, elts, 0)->value; i = VEC_index (constructor_elt, elts, 1)->value; if (r == NULL || i == NULL) { tree zero = build_zero_cst (TREE_TYPE (type)); if (r == NULL) r = zero; if (i == NULL) i = zero; } /* Complex types have either COMPLEX_CST or COMPLEX_EXPR to represent creation of a complex value. */ if (TREE_CONSTANT (r) && TREE_CONSTANT (i)) { ctor = build_complex (type, r, i); TREE_OPERAND (*expr_p, 1) = ctor; } else { ctor = build2 (COMPLEX_EXPR, type, r, i); TREE_OPERAND (*expr_p, 1) = ctor; ret = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, rhs_predicate_for (TREE_OPERAND (*expr_p, 0)), fb_rvalue); } } break; case VECTOR_TYPE: { unsigned HOST_WIDE_INT ix; constructor_elt *ce; if (notify_temp_creation) return GS_OK; /* Go ahead and simplify constant constructors to VECTOR_CST. */ if (TREE_CONSTANT (ctor)) { bool constant_p = true; tree value; /* Even when ctor is constant, it might contain non-*_CST elements, such as addresses or trapping values like 1.0/0.0 - 1.0/0.0. Such expressions don't belong in VECTOR_CST nodes. */ FOR_EACH_CONSTRUCTOR_VALUE (elts, ix, value) if (!CONSTANT_CLASS_P (value)) { constant_p = false; break; } if (constant_p) { TREE_OPERAND (*expr_p, 1) = build_vector_from_ctor (type, elts); break; } /* Don't reduce an initializer constant even if we can't make a VECTOR_CST. It won't do anything for us, and it'll prevent us from representing it as a single constant. */ if (initializer_constant_valid_p (ctor, type)) break; TREE_CONSTANT (ctor) = 0; } /* Vector types use CONSTRUCTOR all the way through gimple compilation as a general initializer. */ FOR_EACH_VEC_ELT (constructor_elt, elts, ix, ce) { enum gimplify_status tret; tret = gimplify_expr (&ce->value, pre_p, post_p, is_gimple_val, fb_rvalue); if (tret == GS_ERROR) ret = GS_ERROR; } if (!is_gimple_reg (TREE_OPERAND (*expr_p, 0))) TREE_OPERAND (*expr_p, 1) = get_formal_tmp_var (ctor, pre_p); } break; default: /* So how did we get a CONSTRUCTOR for a scalar type? */ gcc_unreachable (); } if (ret == GS_ERROR) return GS_ERROR; else if (want_value) { *expr_p = object; return GS_OK; } else { /* If we have gimplified both sides of the initializer but have not emitted an assignment, do so now. */ if (*expr_p) { tree lhs = TREE_OPERAND (*expr_p, 0); tree rhs = TREE_OPERAND (*expr_p, 1); gimple init = gimple_build_assign (lhs, rhs); gimplify_seq_add_stmt (pre_p, init); *expr_p = NULL; } return GS_ALL_DONE; } } /* Given a pointer value OP0, return a simplified version of an indirection through OP0, or NULL_TREE if no simplification is possible. Note that the resulting type may be different from the type pointed to in the sense that it is still compatible from the langhooks point of view. */ tree gimple_fold_indirect_ref (tree t) { tree ptype = TREE_TYPE (t), type = TREE_TYPE (ptype); tree sub = t; tree subtype; STRIP_NOPS (sub); subtype = TREE_TYPE (sub); if (!POINTER_TYPE_P (subtype)) return NULL_TREE; if (TREE_CODE (sub) == ADDR_EXPR) { tree op = TREE_OPERAND (sub, 0); tree optype = TREE_TYPE (op); /* *&p => p */ if (useless_type_conversion_p (type, optype)) return op; /* *(foo *)&fooarray => fooarray[0] */ if (TREE_CODE (optype) == ARRAY_TYPE && TREE_CODE (TYPE_SIZE (TREE_TYPE (optype))) == INTEGER_CST && useless_type_conversion_p (type, TREE_TYPE (optype))) { tree type_domain = TYPE_DOMAIN (optype); tree min_val = size_zero_node; if (type_domain && TYPE_MIN_VALUE (type_domain)) min_val = TYPE_MIN_VALUE (type_domain); if (TREE_CODE (min_val) == INTEGER_CST) return build4 (ARRAY_REF, type, op, min_val, NULL_TREE, NULL_TREE); } /* *(foo *)&complexfoo => __real__ complexfoo */ else if (TREE_CODE (optype) == COMPLEX_TYPE && useless_type_conversion_p (type, TREE_TYPE (optype))) return fold_build1 (REALPART_EXPR, type, op); /* *(foo *)&vectorfoo => BIT_FIELD_REF<vectorfoo,...> */ else if (TREE_CODE (optype) == VECTOR_TYPE && useless_type_conversion_p (type, TREE_TYPE (optype))) { tree part_width = TYPE_SIZE (type); tree index = bitsize_int (0); return fold_build3 (BIT_FIELD_REF, type, op, part_width, index); } } /* *(p + CST) -> ... */ if (TREE_CODE (sub) == POINTER_PLUS_EXPR && TREE_CODE (TREE_OPERAND (sub, 1)) == INTEGER_CST) { tree addr = TREE_OPERAND (sub, 0); tree off = TREE_OPERAND (sub, 1); tree addrtype; STRIP_NOPS (addr); addrtype = TREE_TYPE (addr); /* ((foo*)&vectorfoo)[1] -> BIT_FIELD_REF<vectorfoo,...> */ if (TREE_CODE (addr) == ADDR_EXPR && TREE_CODE (TREE_TYPE (addrtype)) == VECTOR_TYPE && useless_type_conversion_p (type, TREE_TYPE (TREE_TYPE (addrtype))) && host_integerp (off, 1)) { unsigned HOST_WIDE_INT offset = tree_low_cst (off, 1); tree part_width = TYPE_SIZE (type); unsigned HOST_WIDE_INT part_widthi = tree_low_cst (part_width, 0) / BITS_PER_UNIT; unsigned HOST_WIDE_INT indexi = offset * BITS_PER_UNIT; tree index = bitsize_int (indexi); if (offset / part_widthi <= TYPE_VECTOR_SUBPARTS (TREE_TYPE (addrtype))) return fold_build3 (BIT_FIELD_REF, type, TREE_OPERAND (addr, 0), part_width, index); } /* ((foo*)&complexfoo)[1] -> __imag__ complexfoo */ if (TREE_CODE (addr) == ADDR_EXPR && TREE_CODE (TREE_TYPE (addrtype)) == COMPLEX_TYPE && useless_type_conversion_p (type, TREE_TYPE (TREE_TYPE (addrtype)))) { tree size = TYPE_SIZE_UNIT (type); if (tree_int_cst_equal (size, off)) return fold_build1 (IMAGPART_EXPR, type, TREE_OPERAND (addr, 0)); } /* *(p + CST) -> MEM_REF <p, CST>. */ if (TREE_CODE (addr) != ADDR_EXPR || DECL_P (TREE_OPERAND (addr, 0))) return fold_build2 (MEM_REF, type, addr, build_int_cst_wide (ptype, TREE_INT_CST_LOW (off), TREE_INT_CST_HIGH (off))); } /* *(foo *)fooarrptr => (*fooarrptr)[0] */ if (TREE_CODE (TREE_TYPE (subtype)) == ARRAY_TYPE && TREE_CODE (TYPE_SIZE (TREE_TYPE (TREE_TYPE (subtype)))) == INTEGER_CST && useless_type_conversion_p (type, TREE_TYPE (TREE_TYPE (subtype)))) { tree type_domain; tree min_val = size_zero_node; tree osub = sub; sub = gimple_fold_indirect_ref (sub); if (! sub) sub = build1 (INDIRECT_REF, TREE_TYPE (subtype), osub); type_domain = TYPE_DOMAIN (TREE_TYPE (sub)); if (type_domain && TYPE_MIN_VALUE (type_domain)) min_val = TYPE_MIN_VALUE (type_domain); if (TREE_CODE (min_val) == INTEGER_CST) return build4 (ARRAY_REF, type, sub, min_val, NULL_TREE, NULL_TREE); } return NULL_TREE; } /* Given a pointer value OP0, return a simplified version of an indirection through OP0, or NULL_TREE if no simplification is possible. This may only be applied to a rhs of an expression. Note that the resulting type may be different from the type pointed to in the sense that it is still compatible from the langhooks point of view. */ static tree gimple_fold_indirect_ref_rhs (tree t) { return gimple_fold_indirect_ref (t); } /* Subroutine of gimplify_modify_expr to do simplifications of MODIFY_EXPRs based on the code of the RHS. We loop for as long as something changes. */ static enum gimplify_status gimplify_modify_expr_rhs (tree *expr_p, tree *from_p, tree *to_p, gimple_seq *pre_p, gimple_seq *post_p, bool want_value) { enum gimplify_status ret = GS_UNHANDLED; bool changed; do { changed = false; switch (TREE_CODE (*from_p)) { case VAR_DECL: /* If we're assigning from a read-only variable initialized with a constructor, do the direct assignment from the constructor, but only if neither source nor target are volatile since this latter assignment might end up being done on a per-field basis. */ if (DECL_INITIAL (*from_p) && TREE_READONLY (*from_p) && !TREE_THIS_VOLATILE (*from_p) && !TREE_THIS_VOLATILE (*to_p) && TREE_CODE (DECL_INITIAL (*from_p)) == CONSTRUCTOR) { tree old_from = *from_p; enum gimplify_status subret; /* Move the constructor into the RHS. */ *from_p = unshare_expr (DECL_INITIAL (*from_p)); /* Let's see if gimplify_init_constructor will need to put it in memory. */ subret = gimplify_init_constructor (expr_p, NULL, NULL, false, true); if (subret == GS_ERROR) { /* If so, revert the change. */ *from_p = old_from; } else { ret = GS_OK; changed = true; } } break; case INDIRECT_REF: { /* If we have code like *(const A*)(A*)&x where the type of "x" is a (possibly cv-qualified variant of "A"), treat the entire expression as identical to "x". This kind of code arises in C++ when an object is bound to a const reference, and if "x" is a TARGET_EXPR we want to take advantage of the optimization below. */ bool volatile_p = TREE_THIS_VOLATILE (*from_p); tree t = gimple_fold_indirect_ref_rhs (TREE_OPERAND (*from_p, 0)); if (t) { if (TREE_THIS_VOLATILE (t) != volatile_p) { if (TREE_CODE_CLASS (TREE_CODE (t)) == tcc_declaration) t = build_simple_mem_ref_loc (EXPR_LOCATION (*from_p), build_fold_addr_expr (t)); if (REFERENCE_CLASS_P (t)) TREE_THIS_VOLATILE (t) = volatile_p; } *from_p = t; ret = GS_OK; changed = true; } break; } case TARGET_EXPR: { /* If we are initializing something from a TARGET_EXPR, strip the TARGET_EXPR and initialize it directly, if possible. This can't be done if the initializer is void, since that implies that the temporary is set in some non-trivial way. ??? What about code that pulls out the temp and uses it elsewhere? I think that such code never uses the TARGET_EXPR as an initializer. If I'm wrong, we'll die because the temp won't have any RTL. In that case, I guess we'll need to replace references somehow. */ tree init = TARGET_EXPR_INITIAL (*from_p); if (init && !VOID_TYPE_P (TREE_TYPE (init))) { *from_p = init; ret = GS_OK; changed = true; } } break; case COMPOUND_EXPR: /* Remove any COMPOUND_EXPR in the RHS so the following cases will be caught. */ gimplify_compound_expr (from_p, pre_p, true); ret = GS_OK; changed = true; break; case CONSTRUCTOR: /* If we already made some changes, let the front end have a crack at this before we break it down. */ if (ret != GS_UNHANDLED) break; /* If we're initializing from a CONSTRUCTOR, break this into individual MODIFY_EXPRs. */ return gimplify_init_constructor (expr_p, pre_p, post_p, want_value, false); case COND_EXPR: /* If we're assigning to a non-register type, push the assignment down into the branches. This is mandatory for ADDRESSABLE types, since we cannot generate temporaries for such, but it saves a copy in other cases as well. */ if (!is_gimple_reg_type (TREE_TYPE (*from_p))) { /* This code should mirror the code in gimplify_cond_expr. */ enum tree_code code = TREE_CODE (*expr_p); tree cond = *from_p; tree result = *to_p; ret = gimplify_expr (&result, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret != GS_ERROR) ret = GS_OK; if (TREE_TYPE (TREE_OPERAND (cond, 1)) != void_type_node) TREE_OPERAND (cond, 1) = build2 (code, void_type_node, result, TREE_OPERAND (cond, 1)); if (TREE_TYPE (TREE_OPERAND (cond, 2)) != void_type_node) TREE_OPERAND (cond, 2) = build2 (code, void_type_node, unshare_expr (result), TREE_OPERAND (cond, 2)); TREE_TYPE (cond) = void_type_node; recalculate_side_effects (cond); if (want_value) { gimplify_and_add (cond, pre_p); *expr_p = unshare_expr (result); } else *expr_p = cond; return ret; } break; case CALL_EXPR: /* For calls that return in memory, give *to_p as the CALL_EXPR's return slot so that we don't generate a temporary. */ if (!CALL_EXPR_RETURN_SLOT_OPT (*from_p) && aggregate_value_p (*from_p, *from_p)) { bool use_target; if (!(rhs_predicate_for (*to_p))(*from_p)) /* If we need a temporary, *to_p isn't accurate. */ use_target = false; /* It's OK to use the return slot directly unless it's an NRV. */ else if (TREE_CODE (*to_p) == RESULT_DECL && DECL_NAME (*to_p) == NULL_TREE && needs_to_live_in_memory (*to_p)) use_target = true; else if (is_gimple_reg_type (TREE_TYPE (*to_p)) || (DECL_P (*to_p) && DECL_REGISTER (*to_p))) /* Don't force regs into memory. */ use_target = false; else if (TREE_CODE (*expr_p) == INIT_EXPR) /* It's OK to use the target directly if it's being initialized. */ use_target = true; else if (variably_modified_type_p (TREE_TYPE (*to_p), NULL_TREE)) /* Always use the target and thus RSO for variable-sized types. GIMPLE cannot deal with a variable-sized assignment embedded in a call statement. */ use_target = true; else if (TREE_CODE (*to_p) != SSA_NAME && (!is_gimple_variable (*to_p) || needs_to_live_in_memory (*to_p))) /* Don't use the original target if it's already addressable; if its address escapes, and the called function uses the NRV optimization, a conforming program could see *to_p change before the called function returns; see c++/19317. When optimizing, the return_slot pass marks more functions as safe after we have escape info. */ use_target = false; else use_target = true; if (use_target) { CALL_EXPR_RETURN_SLOT_OPT (*from_p) = 1; mark_addressable (*to_p); } } break; case WITH_SIZE_EXPR: /* Likewise for calls that return an aggregate of non-constant size, since we would not be able to generate a temporary at all. */ if (TREE_CODE (TREE_OPERAND (*from_p, 0)) == CALL_EXPR) { *from_p = TREE_OPERAND (*from_p, 0); /* We don't change ret in this case because the WITH_SIZE_EXPR might have been added in gimplify_modify_expr, so returning GS_OK would lead to an infinite loop. */ changed = true; } break; /* If we're initializing from a container, push the initialization inside it. */ case CLEANUP_POINT_EXPR: case BIND_EXPR: case STATEMENT_LIST: { tree wrap = *from_p; tree t; ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_min_lval, fb_lvalue); if (ret != GS_ERROR) ret = GS_OK; t = voidify_wrapper_expr (wrap, *expr_p); gcc_assert (t == *expr_p); if (want_value) { gimplify_and_add (wrap, pre_p); *expr_p = unshare_expr (*to_p); } else *expr_p = wrap; return GS_OK; } case COMPOUND_LITERAL_EXPR: { tree complit = TREE_OPERAND (*expr_p, 1); tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (complit); tree decl = DECL_EXPR_DECL (decl_s); tree init = DECL_INITIAL (decl); /* struct T x = (struct T) { 0, 1, 2 } can be optimized into struct T x = { 0, 1, 2 } if the address of the compound literal has never been taken. */ if (!TREE_ADDRESSABLE (complit) && !TREE_ADDRESSABLE (decl) && init) { *expr_p = copy_node (*expr_p); TREE_OPERAND (*expr_p, 1) = init; return GS_OK; } } default: break; } } while (changed); return ret; } /* Promote partial stores to COMPLEX variables to total stores. *EXPR_P is a MODIFY_EXPR with a lhs of a REAL/IMAGPART_EXPR of a variable with DECL_GIMPLE_REG_P set. IMPORTANT NOTE: This promotion is performed by introducing a load of the other, unmodified part of the complex object just before the total store. As a consequence, if the object is still uninitialized, an undefined value will be loaded into a register, which may result in a spurious exception if the register is floating-point and the value happens to be a signaling NaN for example. Then the fully-fledged complex operations lowering pass followed by a DCE pass are necessary in order to fix things up. */ static enum gimplify_status gimplify_modify_expr_complex_part (tree *expr_p, gimple_seq *pre_p, bool want_value) { enum tree_code code, ocode; tree lhs, rhs, new_rhs, other, realpart, imagpart; lhs = TREE_OPERAND (*expr_p, 0); rhs = TREE_OPERAND (*expr_p, 1); code = TREE_CODE (lhs); lhs = TREE_OPERAND (lhs, 0); ocode = code == REALPART_EXPR ? IMAGPART_EXPR : REALPART_EXPR; other = build1 (ocode, TREE_TYPE (rhs), lhs); TREE_NO_WARNING (other) = 1; other = get_formal_tmp_var (other, pre_p); realpart = code == REALPART_EXPR ? rhs : other; imagpart = code == REALPART_EXPR ? other : rhs; if (TREE_CONSTANT (realpart) && TREE_CONSTANT (imagpart)) new_rhs = build_complex (TREE_TYPE (lhs), realpart, imagpart); else new_rhs = build2 (COMPLEX_EXPR, TREE_TYPE (lhs), realpart, imagpart); gimplify_seq_add_stmt (pre_p, gimple_build_assign (lhs, new_rhs)); *expr_p = (want_value) ? rhs : NULL_TREE; return GS_ALL_DONE; } /* Gimplify the MODIFY_EXPR node pointed to by EXPR_P. modify_expr : varname '=' rhs | '*' ID '=' rhs PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. WANT_VALUE is nonzero iff we want to use the value of this expression in another expression. */ static enum gimplify_status gimplify_modify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, bool want_value) { tree *from_p = &TREE_OPERAND (*expr_p, 1); tree *to_p = &TREE_OPERAND (*expr_p, 0); enum gimplify_status ret = GS_UNHANDLED; gimple assign; location_t loc = EXPR_LOCATION (*expr_p); gcc_assert (TREE_CODE (*expr_p) == MODIFY_EXPR || TREE_CODE (*expr_p) == INIT_EXPR); /* Trying to simplify a clobber using normal logic doesn't work, so handle it here. */ if (TREE_CLOBBER_P (*from_p)) { gcc_assert (!want_value && TREE_CODE (*to_p) == VAR_DECL); gimplify_seq_add_stmt (pre_p, gimple_build_assign (*to_p, *from_p)); *expr_p = NULL; return GS_ALL_DONE; } /* Insert pointer conversions required by the middle-end that are not required by the frontend. This fixes middle-end type checking for for example gcc.dg/redecl-6.c. */ if (POINTER_TYPE_P (TREE_TYPE (*to_p))) { STRIP_USELESS_TYPE_CONVERSION (*from_p); if (!useless_type_conversion_p (TREE_TYPE (*to_p), TREE_TYPE (*from_p))) *from_p = fold_convert_loc (loc, TREE_TYPE (*to_p), *from_p); } /* See if any simplifications can be done based on what the RHS is. */ ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p, want_value); if (ret != GS_UNHANDLED) return ret; /* For zero sized types only gimplify the left hand side and right hand side as statements and throw away the assignment. Do this after gimplify_modify_expr_rhs so we handle TARGET_EXPRs of addressable types properly. */ if (zero_sized_type (TREE_TYPE (*from_p)) && !want_value) { gimplify_stmt (from_p, pre_p); gimplify_stmt (to_p, pre_p); *expr_p = NULL_TREE; return GS_ALL_DONE; } /* If the value being copied is of variable width, compute the length of the copy into a WITH_SIZE_EXPR. Note that we need to do this before gimplifying any of the operands so that we can resolve any PLACEHOLDER_EXPRs in the size. Also note that the RTL expander uses the size of the expression to be copied, not of the destination, so that is what we must do here. */ maybe_with_size_expr (from_p); ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; /* As a special case, we have to temporarily allow for assignments with a CALL_EXPR on the RHS. Since in GIMPLE a function call is a toplevel statement, when gimplifying the GENERIC expression MODIFY_EXPR <a, CALL_EXPR <foo>>, we cannot create the tuple GIMPLE_ASSIGN <a, GIMPLE_CALL <foo>>. Instead, we need to create the tuple GIMPLE_CALL <a, foo>. To prevent gimplify_expr from trying to create a new temporary for foo's LHS, we tell it that it should only gimplify until it reaches the CALL_EXPR. On return from gimplify_expr, the newly created GIMPLE_CALL <foo> will be the last statement in *PRE_P and all we need to do here is set 'a' to be its LHS. */ ret = gimplify_expr (from_p, pre_p, post_p, rhs_predicate_for (*to_p), fb_rvalue); if (ret == GS_ERROR) return ret; /* Now see if the above changed *from_p to something we handle specially. */ ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p, want_value); if (ret != GS_UNHANDLED) return ret; /* If we've got a variable sized assignment between two lvalues (i.e. does not involve a call), then we can make things a bit more straightforward by converting the assignment to memcpy or memset. */ if (TREE_CODE (*from_p) == WITH_SIZE_EXPR) { tree from = TREE_OPERAND (*from_p, 0); tree size = TREE_OPERAND (*from_p, 1); if (TREE_CODE (from) == CONSTRUCTOR) return gimplify_modify_expr_to_memset (expr_p, size, want_value, pre_p); if (is_gimple_addressable (from)) { *from_p = from; return gimplify_modify_expr_to_memcpy (expr_p, size, want_value, pre_p); } } /* Transform partial stores to non-addressable complex variables into total stores. This allows us to use real instead of virtual operands for these variables, which improves optimization. */ if ((TREE_CODE (*to_p) == REALPART_EXPR || TREE_CODE (*to_p) == IMAGPART_EXPR) && is_gimple_reg (TREE_OPERAND (*to_p, 0))) return gimplify_modify_expr_complex_part (expr_p, pre_p, want_value); /* Try to alleviate the effects of the gimplification creating artificial temporaries (see for example is_gimple_reg_rhs) on the debug info. */ if (!gimplify_ctxp->into_ssa && TREE_CODE (*from_p) == VAR_DECL && DECL_IGNORED_P (*from_p) && DECL_P (*to_p) && !DECL_IGNORED_P (*to_p)) { if (!DECL_NAME (*from_p) && DECL_NAME (*to_p)) DECL_NAME (*from_p) = create_tmp_var_name (IDENTIFIER_POINTER (DECL_NAME (*to_p))); DECL_DEBUG_EXPR_IS_FROM (*from_p) = 1; SET_DECL_DEBUG_EXPR (*from_p, *to_p); } if (want_value && TREE_THIS_VOLATILE (*to_p)) *from_p = get_initialized_tmp_var (*from_p, pre_p, post_p); if (TREE_CODE (*from_p) == CALL_EXPR) { /* Since the RHS is a CALL_EXPR, we need to create a GIMPLE_CALL instead of a GIMPLE_ASSIGN. */ tree fnptrtype = TREE_TYPE (CALL_EXPR_FN (*from_p)); CALL_EXPR_FN (*from_p) = TREE_OPERAND (CALL_EXPR_FN (*from_p), 0); STRIP_USELESS_TYPE_CONVERSION (CALL_EXPR_FN (*from_p)); assign = gimple_build_call_from_tree (*from_p); gimple_call_set_fntype (assign, TREE_TYPE (fnptrtype)); if (!gimple_call_noreturn_p (assign)) gimple_call_set_lhs (assign, *to_p); } else { assign = gimple_build_assign (*to_p, *from_p); gimple_set_location (assign, EXPR_LOCATION (*expr_p)); } gimplify_seq_add_stmt (pre_p, assign); if (gimplify_ctxp->into_ssa && is_gimple_reg (*to_p)) { /* If we've somehow already got an SSA_NAME on the LHS, then we've probably modified it twice. Not good. */ gcc_assert (TREE_CODE (*to_p) != SSA_NAME); *to_p = make_ssa_name (*to_p, assign); gimple_set_lhs (assign, *to_p); } if (want_value) { *expr_p = TREE_THIS_VOLATILE (*to_p) ? *from_p : unshare_expr (*to_p); return GS_OK; } else *expr_p = NULL; return GS_ALL_DONE; } /* Gimplify a comparison between two variable-sized objects. Do this with a call to BUILT_IN_MEMCMP. */ static enum gimplify_status gimplify_variable_sized_compare (tree *expr_p) { location_t loc = EXPR_LOCATION (*expr_p); tree op0 = TREE_OPERAND (*expr_p, 0); tree op1 = TREE_OPERAND (*expr_p, 1); tree t, arg, dest, src, expr; arg = TYPE_SIZE_UNIT (TREE_TYPE (op0)); arg = unshare_expr (arg); arg = SUBSTITUTE_PLACEHOLDER_IN_EXPR (arg, op0); src = build_fold_addr_expr_loc (loc, op1); dest = build_fold_addr_expr_loc (loc, op0); t = builtin_decl_implicit (BUILT_IN_MEMCMP); t = build_call_expr_loc (loc, t, 3, dest, src, arg); expr = build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), t, integer_zero_node); SET_EXPR_LOCATION (expr, loc); *expr_p = expr; return GS_OK; } /* Gimplify a comparison between two aggregate objects of integral scalar mode as a comparison between the bitwise equivalent scalar values. */ static enum gimplify_status gimplify_scalar_mode_aggregate_compare (tree *expr_p) { location_t loc = EXPR_LOCATION (*expr_p); tree op0 = TREE_OPERAND (*expr_p, 0); tree op1 = TREE_OPERAND (*expr_p, 1); tree type = TREE_TYPE (op0); tree scalar_type = lang_hooks.types.type_for_mode (TYPE_MODE (type), 1); op0 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, scalar_type, op0); op1 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, scalar_type, op1); *expr_p = fold_build2_loc (loc, TREE_CODE (*expr_p), TREE_TYPE (*expr_p), op0, op1); return GS_OK; } /* Gimplify an expression sequence. This function gimplifies each expression and rewrites the original expression with the last expression of the sequence in GIMPLE form. PRE_P points to the list where the side effects for all the expressions in the sequence will be emitted. WANT_VALUE is true when the result of the last COMPOUND_EXPR is used. */ static enum gimplify_status gimplify_compound_expr (tree *expr_p, gimple_seq *pre_p, bool want_value) { tree t = *expr_p; do { tree *sub_p = &TREE_OPERAND (t, 0); if (TREE_CODE (*sub_p) == COMPOUND_EXPR) gimplify_compound_expr (sub_p, pre_p, false); else gimplify_stmt (sub_p, pre_p); t = TREE_OPERAND (t, 1); } while (TREE_CODE (t) == COMPOUND_EXPR); *expr_p = t; if (want_value) return GS_OK; else { gimplify_stmt (expr_p, pre_p); return GS_ALL_DONE; } } /* Gimplify a SAVE_EXPR node. EXPR_P points to the expression to gimplify. After gimplification, EXPR_P will point to a new temporary that holds the original value of the SAVE_EXPR node. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_save_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p) { enum gimplify_status ret = GS_ALL_DONE; tree val; gcc_assert (TREE_CODE (*expr_p) == SAVE_EXPR); val = TREE_OPERAND (*expr_p, 0); /* If the SAVE_EXPR has not been resolved, then evaluate it once. */ if (!SAVE_EXPR_RESOLVED_P (*expr_p)) { /* The operand may be a void-valued expression such as SAVE_EXPRs generated by the Java frontend for class initialization. It is being executed only for its side-effects. */ if (TREE_TYPE (val) == void_type_node) { ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_stmt, fb_none); val = NULL; } else val = get_initialized_tmp_var (val, pre_p, post_p); TREE_OPERAND (*expr_p, 0) = val; SAVE_EXPR_RESOLVED_P (*expr_p) = 1; } *expr_p = val; return ret; } /* Rewrite the ADDR_EXPR node pointed to by EXPR_P unary_expr : ... | '&' varname ... PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. */ static enum gimplify_status gimplify_addr_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p) { tree expr = *expr_p; tree op0 = TREE_OPERAND (expr, 0); enum gimplify_status ret; location_t loc = EXPR_LOCATION (*expr_p); switch (TREE_CODE (op0)) { case INDIRECT_REF: do_indirect_ref: /* Check if we are dealing with an expression of the form '&*ptr'. While the front end folds away '&*ptr' into 'ptr', these expressions may be generated internally by the compiler (e.g., builtins like __builtin_va_end). */ /* Caution: the silent array decomposition semantics we allow for ADDR_EXPR means we can't always discard the pair. */ /* Gimplification of the ADDR_EXPR operand may drop cv-qualification conversions, so make sure we add them if needed. */ { tree op00 = TREE_OPERAND (op0, 0); tree t_expr = TREE_TYPE (expr); tree t_op00 = TREE_TYPE (op00); if (!useless_type_conversion_p (t_expr, t_op00)) op00 = fold_convert_loc (loc, TREE_TYPE (expr), op00); *expr_p = op00; ret = GS_OK; } break; case VIEW_CONVERT_EXPR: /* Take the address of our operand and then convert it to the type of this ADDR_EXPR. ??? The interactions of VIEW_CONVERT_EXPR and aliasing is not at all clear. The impact of this transformation is even less clear. */ /* If the operand is a useless conversion, look through it. Doing so guarantees that the ADDR_EXPR and its operand will remain of the same type. */ if (tree_ssa_useless_type_conversion (TREE_OPERAND (op0, 0))) op0 = TREE_OPERAND (op0, 0); *expr_p = fold_convert_loc (loc, TREE_TYPE (expr), build_fold_addr_expr_loc (loc, TREE_OPERAND (op0, 0))); ret = GS_OK; break; default: /* We use fb_either here because the C frontend sometimes takes the address of a call that returns a struct; see gcc.dg/c99-array-lval-1.c. The gimplifier will correctly make the implied temporary explicit. */ /* Make the operand addressable. */ ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, post_p, is_gimple_addressable, fb_either); if (ret == GS_ERROR) break; /* Then mark it. Beware that it may not be possible to do so directly if a temporary has been created by the gimplification. */ prepare_gimple_addressable (&TREE_OPERAND (expr, 0), pre_p); op0 = TREE_OPERAND (expr, 0); /* For various reasons, the gimplification of the expression may have made a new INDIRECT_REF. */ if (TREE_CODE (op0) == INDIRECT_REF) goto do_indirect_ref; mark_addressable (TREE_OPERAND (expr, 0)); /* The FEs may end up building ADDR_EXPRs early on a decl with an incomplete type. Re-build ADDR_EXPRs in canonical form here. */ if (!types_compatible_p (TREE_TYPE (op0), TREE_TYPE (TREE_TYPE (expr)))) *expr_p = build_fold_addr_expr (op0); /* Make sure TREE_CONSTANT and TREE_SIDE_EFFECTS are set properly. */ recompute_tree_invariant_for_addr_expr (*expr_p); /* If we re-built the ADDR_EXPR add a conversion to the original type if required. */ if (!useless_type_conversion_p (TREE_TYPE (expr), TREE_TYPE (*expr_p))) *expr_p = fold_convert (TREE_TYPE (expr), *expr_p); break; } return ret; } /* Gimplify the operands of an ASM_EXPR. Input operands should be a gimple value; output operands should be a gimple lvalue. */ static enum gimplify_status gimplify_asm_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p) { tree expr; int noutputs; const char **oconstraints; int i; tree link; const char *constraint; bool allows_mem, allows_reg, is_inout; enum gimplify_status ret, tret; gimple stmt; VEC(tree, gc) *inputs; VEC(tree, gc) *outputs; VEC(tree, gc) *clobbers; VEC(tree, gc) *labels; tree link_next; expr = *expr_p; noutputs = list_length (ASM_OUTPUTS (expr)); oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *)); inputs = outputs = clobbers = labels = NULL; ret = GS_ALL_DONE; link_next = NULL_TREE; for (i = 0, link = ASM_OUTPUTS (expr); link; ++i, link = link_next) { bool ok; size_t constraint_len; link_next = TREE_CHAIN (link); oconstraints[i] = constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link))); constraint_len = strlen (constraint); if (constraint_len == 0) continue; ok = parse_output_constraint (&constraint, i, 0, 0, &allows_mem, &allows_reg, &is_inout); if (!ok) { ret = GS_ERROR; is_inout = false; } if (!allows_reg && allows_mem) mark_addressable (TREE_VALUE (link)); tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_inout ? is_gimple_min_lval : is_gimple_lvalue, fb_lvalue | fb_mayfail); if (tret == GS_ERROR) { error ("invalid lvalue in asm output %d", i); ret = tret; } VEC_safe_push (tree, gc, outputs, link); TREE_CHAIN (link) = NULL_TREE; if (is_inout) { /* An input/output operand. To give the optimizers more flexibility, split it into separate input and output operands. */ tree input; char buf[10]; /* Turn the in/out constraint into an output constraint. */ char *p = xstrdup (constraint); p[0] = '='; TREE_VALUE (TREE_PURPOSE (link)) = build_string (constraint_len, p); /* And add a matching input constraint. */ if (allows_reg) { sprintf (buf, "%d", i); /* If there are multiple alternatives in the constraint, handle each of them individually. Those that allow register will be replaced with operand number, the others will stay unchanged. */ if (strchr (p, ',') != NULL) { size_t len = 0, buflen = strlen (buf); char *beg, *end, *str, *dst; for (beg = p + 1;;) { end = strchr (beg, ','); if (end == NULL) end = strchr (beg, '\0'); if ((size_t) (end - beg) < buflen) len += buflen + 1; else len += end - beg + 1; if (*end) beg = end + 1; else break; } str = (char *) alloca (len); for (beg = p + 1, dst = str;;) { const char *tem; bool mem_p, reg_p, inout_p; end = strchr (beg, ','); if (end) *end = '\0'; beg[-1] = '='; tem = beg - 1; parse_output_constraint (&tem, i, 0, 0, &mem_p, &reg_p, &inout_p); if (dst != str) *dst++ = ','; if (reg_p) { memcpy (dst, buf, buflen); dst += buflen; } else { if (end) len = end - beg; else len = strlen (beg); memcpy (dst, beg, len); dst += len; } if (end) beg = end + 1; else break; } *dst = '\0'; input = build_string (dst - str, str); } else input = build_string (strlen (buf), buf); } else input = build_string (constraint_len - 1, constraint + 1); free (p); input = build_tree_list (build_tree_list (NULL_TREE, input), unshare_expr (TREE_VALUE (link))); ASM_INPUTS (expr) = chainon (ASM_INPUTS (expr), input); } } link_next = NULL_TREE; for (link = ASM_INPUTS (expr); link; ++i, link = link_next) { link_next = TREE_CHAIN (link); constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link))); parse_input_constraint (&constraint, 0, 0, noutputs, 0, oconstraints, &allows_mem, &allows_reg); /* If we can't make copies, we can only accept memory. */ if (TREE_ADDRESSABLE (TREE_TYPE (TREE_VALUE (link)))) { if (allows_mem) allows_reg = 0; else { error ("impossible constraint in %<asm%>"); error ("non-memory input %d must stay in memory", i); return GS_ERROR; } } /* If the operand is a memory input, it should be an lvalue. */ if (!allows_reg && allows_mem) { tree inputv = TREE_VALUE (link); STRIP_NOPS (inputv); if (TREE_CODE (inputv) == PREDECREMENT_EXPR || TREE_CODE (inputv) == PREINCREMENT_EXPR || TREE_CODE (inputv) == POSTDECREMENT_EXPR || TREE_CODE (inputv) == POSTINCREMENT_EXPR) TREE_VALUE (link) = error_mark_node; tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_gimple_lvalue, fb_lvalue | fb_mayfail); mark_addressable (TREE_VALUE (link)); if (tret == GS_ERROR) { if (EXPR_HAS_LOCATION (TREE_VALUE (link))) input_location = EXPR_LOCATION (TREE_VALUE (link)); error ("memory input %d is not directly addressable", i); ret = tret; } } else { tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_gimple_asm_val, fb_rvalue); if (tret == GS_ERROR) ret = tret; } TREE_CHAIN (link) = NULL_TREE; VEC_safe_push (tree, gc, inputs, link); } for (link = ASM_CLOBBERS (expr); link; ++i, link = TREE_CHAIN (link)) VEC_safe_push (tree, gc, clobbers, link); for (link = ASM_LABELS (expr); link; ++i, link = TREE_CHAIN (link)) VEC_safe_push (tree, gc, labels, link); /* Do not add ASMs with errors to the gimple IL stream. */ if (ret != GS_ERROR) { stmt = gimple_build_asm_vec (TREE_STRING_POINTER (ASM_STRING (expr)), inputs, outputs, clobbers, labels); gimple_asm_set_volatile (stmt, ASM_VOLATILE_P (expr)); gimple_asm_set_input (stmt, ASM_INPUT_P (expr)); gimplify_seq_add_stmt (pre_p, stmt); } return ret; } /* Gimplify a CLEANUP_POINT_EXPR. Currently this works by adding GIMPLE_WITH_CLEANUP_EXPRs to the prequeue as we encounter cleanups while gimplifying the body, and converting them to TRY_FINALLY_EXPRs when we return to this function. FIXME should we complexify the prequeue handling instead? Or use flags for all the cleanups and let the optimizer tighten them up? The current code seems pretty fragile; it will break on a cleanup within any non-conditional nesting. But any such nesting would be broken, anyway; we can't write a TRY_FINALLY_EXPR that starts inside a nesting construct and continues out of it. We can do that at the RTL level, though, so having an optimizer to tighten up try/finally regions would be a Good Thing. */ static enum gimplify_status gimplify_cleanup_point_expr (tree *expr_p, gimple_seq *pre_p) { gimple_stmt_iterator iter; gimple_seq body_sequence = NULL; tree temp = voidify_wrapper_expr (*expr_p, NULL); /* We only care about the number of conditions between the innermost CLEANUP_POINT_EXPR and the cleanup. So save and reset the count and any cleanups collected outside the CLEANUP_POINT_EXPR. */ int old_conds = gimplify_ctxp->conditions; gimple_seq old_cleanups = gimplify_ctxp->conditional_cleanups; bool old_in_cleanup_point_expr = gimplify_ctxp->in_cleanup_point_expr; gimplify_ctxp->conditions = 0; gimplify_ctxp->conditional_cleanups = NULL; gimplify_ctxp->in_cleanup_point_expr = true; gimplify_stmt (&TREE_OPERAND (*expr_p, 0), &body_sequence); gimplify_ctxp->conditions = old_conds; gimplify_ctxp->conditional_cleanups = old_cleanups; gimplify_ctxp->in_cleanup_point_expr = old_in_cleanup_point_expr; for (iter = gsi_start (body_sequence); !gsi_end_p (iter); ) { gimple wce = gsi_stmt (iter); if (gimple_code (wce) == GIMPLE_WITH_CLEANUP_EXPR) { if (gsi_one_before_end_p (iter)) { /* Note that gsi_insert_seq_before and gsi_remove do not scan operands, unlike some other sequence mutators. */ if (!gimple_wce_cleanup_eh_only (wce)) gsi_insert_seq_before_without_update (&iter, gimple_wce_cleanup (wce), GSI_SAME_STMT); gsi_remove (&iter, true); break; } else { gimple gtry; gimple_seq seq; enum gimple_try_flags kind; if (gimple_wce_cleanup_eh_only (wce)) kind = GIMPLE_TRY_CATCH; else kind = GIMPLE_TRY_FINALLY; seq = gsi_split_seq_after (iter); gtry = gimple_build_try (seq, gimple_wce_cleanup (wce), kind); /* Do not use gsi_replace here, as it may scan operands. We want to do a simple structural modification only. */ *gsi_stmt_ptr (&iter) = gtry; iter = gsi_start (seq); } } else gsi_next (&iter); } gimplify_seq_add_seq (pre_p, body_sequence); if (temp) { *expr_p = temp; return GS_OK; } else { *expr_p = NULL; return GS_ALL_DONE; } } /* Insert a cleanup marker for gimplify_cleanup_point_expr. CLEANUP is the cleanup action required. EH_ONLY is true if the cleanup should only be executed if an exception is thrown, not on normal exit. */ static void gimple_push_cleanup (tree var, tree cleanup, bool eh_only, gimple_seq *pre_p) { gimple wce; gimple_seq cleanup_stmts = NULL; /* Errors can result in improperly nested cleanups. Which results in confusion when trying to resolve the GIMPLE_WITH_CLEANUP_EXPR. */ if (seen_error ()) return; if (gimple_conditional_context ()) { /* If we're in a conditional context, this is more complex. We only want to run the cleanup if we actually ran the initialization that necessitates it, but we want to run it after the end of the conditional context. So we wrap the try/finally around the condition and use a flag to determine whether or not to actually run the destructor. Thus test ? f(A()) : 0 becomes (approximately) flag = 0; try { if (test) { A::A(temp); flag = 1; val = f(temp); } else { val = 0; } } finally { if (flag) A::~A(temp); } val */ tree flag = create_tmp_var (boolean_type_node, "cleanup"); gimple ffalse = gimple_build_assign (flag, boolean_false_node); gimple ftrue = gimple_build_assign (flag, boolean_true_node); cleanup = build3 (COND_EXPR, void_type_node, flag, cleanup, NULL); gimplify_stmt (&cleanup, &cleanup_stmts); wce = gimple_build_wce (cleanup_stmts); gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, ffalse); gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, wce); gimplify_seq_add_stmt (pre_p, ftrue); /* Because of this manipulation, and the EH edges that jump threading cannot redirect, the temporary (VAR) will appear to be used uninitialized. Don't warn. */ TREE_NO_WARNING (var) = 1; } else { gimplify_stmt (&cleanup, &cleanup_stmts); wce = gimple_build_wce (cleanup_stmts); gimple_wce_set_cleanup_eh_only (wce, eh_only); gimplify_seq_add_stmt (pre_p, wce); } } /* Gimplify a TARGET_EXPR which doesn't appear on the rhs of an INIT_EXPR. */ static enum gimplify_status gimplify_target_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p) { tree targ = *expr_p; tree temp = TARGET_EXPR_SLOT (targ); tree init = TARGET_EXPR_INITIAL (targ); enum gimplify_status ret; if (init) { tree cleanup = NULL_TREE; /* TARGET_EXPR temps aren't part of the enclosing block, so add it to the temps list. Handle also variable length TARGET_EXPRs. */ if (TREE_CODE (DECL_SIZE (temp)) != INTEGER_CST) { if (!TYPE_SIZES_GIMPLIFIED (TREE_TYPE (temp))) gimplify_type_sizes (TREE_TYPE (temp), pre_p); gimplify_vla_decl (temp, pre_p); } else gimple_add_tmp_var (temp); /* If TARGET_EXPR_INITIAL is void, then the mere evaluation of the expression is supposed to initialize the slot. */ if (VOID_TYPE_P (TREE_TYPE (init))) ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none); else { tree init_expr = build2 (INIT_EXPR, void_type_node, temp, init); init = init_expr; ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none); init = NULL; ggc_free (init_expr); } if (ret == GS_ERROR) { /* PR c++/28266 Make sure this is expanded only once. */ TARGET_EXPR_INITIAL (targ) = NULL_TREE; return GS_ERROR; } if (init) gimplify_and_add (init, pre_p); /* If needed, push the cleanup for the temp. */ if (TARGET_EXPR_CLEANUP (targ)) { if (CLEANUP_EH_ONLY (targ)) gimple_push_cleanup (temp, TARGET_EXPR_CLEANUP (targ), CLEANUP_EH_ONLY (targ), pre_p); else cleanup = TARGET_EXPR_CLEANUP (targ); } /* Add a clobber for the temporary going out of scope, like gimplify_bind_expr. */ if (gimplify_ctxp->in_cleanup_point_expr && needs_to_live_in_memory (temp)) { tree clobber = build_constructor (TREE_TYPE (temp), NULL); TREE_THIS_VOLATILE (clobber) = true; clobber = build2 (MODIFY_EXPR, TREE_TYPE (temp), temp, clobber); if (cleanup) cleanup = build2 (COMPOUND_EXPR, void_type_node, cleanup, clobber); else cleanup = clobber; } if (cleanup) gimple_push_cleanup (temp, cleanup, false, pre_p); /* Only expand this once. */ TREE_OPERAND (targ, 3) = init; TARGET_EXPR_INITIAL (targ) = NULL_TREE; } else /* We should have expanded this before. */ gcc_assert (DECL_SEEN_IN_BIND_EXPR_P (temp)); *expr_p = temp; return GS_OK; } /* Gimplification of expression trees. */ /* Gimplify an expression which appears at statement context. The corresponding GIMPLE statements are added to *SEQ_P. If *SEQ_P is NULL, a new sequence is allocated. Return true if we actually added a statement to the queue. */ bool gimplify_stmt (tree *stmt_p, gimple_seq *seq_p) { gimple_seq_node last; if (!*seq_p) *seq_p = gimple_seq_alloc (); last = gimple_seq_last (*seq_p); gimplify_expr (stmt_p, seq_p, NULL, is_gimple_stmt, fb_none); return last != gimple_seq_last (*seq_p); } /* Convert the GENERIC expression tree *EXPR_P to GIMPLE. If the expression produces a value to be used as an operand inside a GIMPLE statement, the value will be stored back in *EXPR_P. This value will be a tree of class tcc_declaration, tcc_constant, tcc_reference or an SSA_NAME. The corresponding sequence of GIMPLE statements is emitted in PRE_P and POST_P. Additionally, this process may overwrite parts of the input expression during gimplification. Ideally, it should be possible to do non-destructive gimplification. EXPR_P points to the GENERIC expression to convert to GIMPLE. If the expression needs to evaluate to a value to be used as an operand in a GIMPLE statement, this value will be stored in *EXPR_P on exit. This happens when the caller specifies one of fb_lvalue or fb_rvalue fallback flags. PRE_P will contain the sequence of GIMPLE statements corresponding to the evaluation of EXPR and all the side-effects that must be executed before the main expression. On exit, the last statement of PRE_P is the core statement being gimplified. For instance, when gimplifying 'if (++a)' the last statement in PRE_P will be 'if (t.1)' where t.1 is the result of pre-incrementing 'a'. POST_P will contain the sequence of GIMPLE statements corresponding to the evaluation of all the side-effects that must be executed after the main expression. If this is NULL, the post side-effects are stored at the end of PRE_P. The reason why the output is split in two is to handle post side-effects explicitly. In some cases, an expression may have inner and outer post side-effects which need to be emitted in an order different from the one given by the recursive traversal. For instance, for the expression (*p--)++ the post side-effects of '--' must actually occur *after* the post side-effects of '++'. However, gimplification will first visit the inner expression, so if a separate POST sequence was not used, the resulting sequence would be: 1 t.1 = *p 2 p = p - 1 3 t.2 = t.1 + 1 4 *p = t.2 However, the post-decrement operation in line #2 must not be evaluated until after the store to *p at line #4, so the correct sequence should be: 1 t.1 = *p 2 t.2 = t.1 + 1 3 *p = t.2 4 p = p - 1 So, by specifying a separate post queue, it is possible to emit the post side-effects in the correct order. If POST_P is NULL, an internal queue will be used. Before returning to the caller, the sequence POST_P is appended to the main output sequence PRE_P. GIMPLE_TEST_F points to a function that takes a tree T and returns nonzero if T is in the GIMPLE form requested by the caller. The GIMPLE predicates are in gimple.c. FALLBACK tells the function what sort of a temporary we want if gimplification cannot produce an expression that complies with GIMPLE_TEST_F. fb_none means that no temporary should be generated fb_rvalue means that an rvalue is OK to generate fb_lvalue means that an lvalue is OK to generate fb_either means that either is OK, but an lvalue is preferable. fb_mayfail means that gimplification may fail (in which case GS_ERROR will be returned) The return value is either GS_ERROR or GS_ALL_DONE, since this function iterates until EXPR is completely gimplified or an error occurs. */ enum gimplify_status gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, bool (*gimple_test_f) (tree), fallback_t fallback) { tree tmp; gimple_seq internal_pre = NULL; gimple_seq internal_post = NULL; tree save_expr; bool is_statement; location_t saved_location; enum gimplify_status ret; gimple_stmt_iterator pre_last_gsi, post_last_gsi; save_expr = *expr_p; if (save_expr == NULL_TREE) return GS_ALL_DONE; /* If we are gimplifying a top-level statement, PRE_P must be valid. */ is_statement = gimple_test_f == is_gimple_stmt; if (is_statement) gcc_assert (pre_p); /* Consistency checks. */ if (gimple_test_f == is_gimple_reg) gcc_assert (fallback & (fb_rvalue | fb_lvalue)); else if (gimple_test_f == is_gimple_val || gimple_test_f == is_gimple_call_addr || gimple_test_f == is_gimple_condexpr || gimple_test_f == is_gimple_mem_rhs || gimple_test_f == is_gimple_mem_rhs_or_call || gimple_test_f == is_gimple_reg_rhs || gimple_test_f == is_gimple_reg_rhs_or_call || gimple_test_f == is_gimple_asm_val || gimple_test_f == is_gimple_mem_ref_addr) gcc_assert (fallback & fb_rvalue); else if (gimple_test_f == is_gimple_min_lval || gimple_test_f == is_gimple_lvalue) gcc_assert (fallback & fb_lvalue); else if (gimple_test_f == is_gimple_addressable) gcc_assert (fallback & fb_either); else if (gimple_test_f == is_gimple_stmt) gcc_assert (fallback == fb_none); else { /* We should have recognized the GIMPLE_TEST_F predicate to know what kind of fallback to use in case a temporary is needed to hold the value or address of *EXPR_P. */ gcc_unreachable (); } /* We used to check the predicate here and return immediately if it succeeds. This is wrong; the design is for gimplification to be idempotent, and for the predicates to only test for valid forms, not whether they are fully simplified. */ if (pre_p == NULL) pre_p = &internal_pre; if (post_p == NULL) post_p = &internal_post; /* Remember the last statements added to PRE_P and POST_P. Every new statement added by the gimplification helpers needs to be annotated with location information. To centralize the responsibility, we remember the last statement that had been added to both queues before gimplifying *EXPR_P. If gimplification produces new statements in PRE_P and POST_P, those statements will be annotated with the same location information as *EXPR_P. */ pre_last_gsi = gsi_last (*pre_p); post_last_gsi = gsi_last (*post_p); saved_location = input_location; if (save_expr != error_mark_node && EXPR_HAS_LOCATION (*expr_p)) input_location = EXPR_LOCATION (*expr_p); /* Loop over the specific gimplifiers until the toplevel node remains the same. */ do { /* Strip away as many useless type conversions as possible at the toplevel. */ STRIP_USELESS_TYPE_CONVERSION (*expr_p); /* Remember the expr. */ save_expr = *expr_p; /* Die, die, die, my darling. */ if (save_expr == error_mark_node || (TREE_TYPE (save_expr) && TREE_TYPE (save_expr) == error_mark_node)) { ret = GS_ERROR; break; } /* Do any language-specific gimplification. */ ret = ((enum gimplify_status) lang_hooks.gimplify_expr (expr_p, pre_p, post_p)); if (ret == GS_OK) { if (*expr_p == NULL_TREE) break; if (*expr_p != save_expr) continue; } else if (ret != GS_UNHANDLED) break; /* Make sure that all the cases set 'ret' appropriately. */ ret = GS_UNHANDLED; switch (TREE_CODE (*expr_p)) { /* First deal with the special cases. */ case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: ret = gimplify_self_mod_expr (expr_p, pre_p, post_p, fallback != fb_none); break; case ARRAY_REF: case ARRAY_RANGE_REF: case REALPART_EXPR: case IMAGPART_EXPR: case COMPONENT_REF: case VIEW_CONVERT_EXPR: ret = gimplify_compound_lval (expr_p, pre_p, post_p, fallback ? fallback : fb_rvalue); break; case COND_EXPR: ret = gimplify_cond_expr (expr_p, pre_p, fallback); /* C99 code may assign to an array in a structure value of a conditional expression, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); mark_addressable (*expr_p); ret = GS_OK; } break; #if 1 /* Modula-3: This gets converted fairly early, in tree-nested.c. */ case STATIC_CHAIN_EXPR: ret = GS_ALL_DONE; break; #endif case CALL_EXPR: ret = gimplify_call_expr (expr_p, pre_p, fallback != fb_none); /* C99 code may assign to an array in a structure returned from a function, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); mark_addressable (*expr_p); ret = GS_OK; } break; case TREE_LIST: gcc_unreachable (); case COMPOUND_EXPR: ret = gimplify_compound_expr (expr_p, pre_p, fallback != fb_none); break; case COMPOUND_LITERAL_EXPR: ret = gimplify_compound_literal_expr (expr_p, pre_p); break; case MODIFY_EXPR: case INIT_EXPR: ret = gimplify_modify_expr (expr_p, pre_p, post_p, fallback != fb_none); break; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: { /* Preserve the original type of the expression and the source location of the outer expression. */ tree org_type = TREE_TYPE (*expr_p); *expr_p = gimple_boolify (*expr_p); *expr_p = build3_loc (input_location, COND_EXPR, org_type, *expr_p, fold_convert_loc (input_location, org_type, boolean_true_node), fold_convert_loc (input_location, org_type, boolean_false_node)); ret = GS_OK; break; } case TRUTH_NOT_EXPR: { tree type = TREE_TYPE (*expr_p); /* The parsers are careful to generate TRUTH_NOT_EXPR only with operands that are always zero or one. We do not fold here but handle the only interesting case manually, as fold may re-introduce the TRUTH_NOT_EXPR. */ *expr_p = gimple_boolify (*expr_p); if (TYPE_PRECISION (TREE_TYPE (*expr_p)) == 1) *expr_p = build1_loc (input_location, BIT_NOT_EXPR, TREE_TYPE (*expr_p), TREE_OPERAND (*expr_p, 0)); else *expr_p = build2_loc (input_location, BIT_XOR_EXPR, TREE_TYPE (*expr_p), TREE_OPERAND (*expr_p, 0), build_int_cst (TREE_TYPE (*expr_p), 1)); if (!useless_type_conversion_p (type, TREE_TYPE (*expr_p))) *expr_p = fold_convert_loc (input_location, type, *expr_p); ret = GS_OK; break; } case ADDR_EXPR: ret = gimplify_addr_expr (expr_p, pre_p, post_p); break; case VA_ARG_EXPR: ret = gimplify_va_arg_expr (expr_p, pre_p, post_p); break; CASE_CONVERT: if (IS_EMPTY_STMT (*expr_p)) { ret = GS_ALL_DONE; break; } if (VOID_TYPE_P (TREE_TYPE (*expr_p)) || fallback == fb_none) { /* Just strip a conversion to void (or in void context) and try again. */ *expr_p = TREE_OPERAND (*expr_p, 0); ret = GS_OK; break; } ret = gimplify_conversion (expr_p); if (ret == GS_ERROR) break; if (*expr_p != save_expr) break; /* FALLTHRU */ case FIX_TRUNC_EXPR: /* unary_expr: ... | '(' cast ')' val | ... */ ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); break; case INDIRECT_REF: { bool volatilep = TREE_THIS_VOLATILE (*expr_p); bool notrap = TREE_THIS_NOTRAP (*expr_p); tree saved_ptr_type = TREE_TYPE (TREE_OPERAND (*expr_p, 0)); *expr_p = fold_indirect_ref_loc (input_location, *expr_p); if (*expr_p != save_expr) { ret = GS_OK; break; } ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_reg, fb_rvalue); if (ret == GS_ERROR) break; recalculate_side_effects (*expr_p); *expr_p = fold_build2_loc (input_location, MEM_REF, TREE_TYPE (*expr_p), TREE_OPERAND (*expr_p, 0), build_int_cst (saved_ptr_type, 0)); TREE_THIS_VOLATILE (*expr_p) = volatilep; TREE_THIS_NOTRAP (*expr_p) = notrap; ret = GS_OK; break; } /* We arrive here through the various re-gimplifcation paths. */ case MEM_REF: /* First try re-folding the whole thing. */ tmp = fold_binary (MEM_REF, TREE_TYPE (*expr_p), TREE_OPERAND (*expr_p, 0), TREE_OPERAND (*expr_p, 1)); if (tmp) { *expr_p = tmp; recalculate_side_effects (*expr_p); ret = GS_OK; break; } /* Avoid re-gimplifying the address operand if it is already in suitable form. Re-gimplifying would mark the address operand addressable. Always gimplify when not in SSA form as we still may have to gimplify decls with value-exprs. */ if (!gimplify_ctxp || !gimplify_ctxp->into_ssa || !is_gimple_mem_ref_addr (TREE_OPERAND (*expr_p, 0))) { ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_mem_ref_addr, fb_rvalue); if (ret == GS_ERROR) break; } recalculate_side_effects (*expr_p); ret = GS_ALL_DONE; break; /* Constants need not be gimplified. */ case INTEGER_CST: case REAL_CST: case FIXED_CST: case STRING_CST: case COMPLEX_CST: case VECTOR_CST: ret = GS_ALL_DONE; break; case CONST_DECL: /* If we require an lvalue, such as for ADDR_EXPR, retain the CONST_DECL node. Otherwise the decl is replaceable by its value. */ /* ??? Should be == fb_lvalue, but ADDR_EXPR passes fb_either. */ if (fallback & fb_lvalue) ret = GS_ALL_DONE; else { *expr_p = DECL_INITIAL (*expr_p); ret = GS_OK; } break; case DECL_EXPR: ret = gimplify_decl_expr (expr_p, pre_p); break; case BIND_EXPR: ret = gimplify_bind_expr (expr_p, pre_p); break; case LOOP_EXPR: ret = gimplify_loop_expr (expr_p, pre_p); break; case SWITCH_EXPR: ret = gimplify_switch_expr (expr_p, pre_p); break; case EXIT_EXPR: ret = gimplify_exit_expr (expr_p); break; case GOTO_EXPR: /* If the target is not LABEL, then it is a computed jump and the target needs to be gimplified. */ if (TREE_CODE (GOTO_DESTINATION (*expr_p)) != LABEL_DECL) { ret = gimplify_expr (&GOTO_DESTINATION (*expr_p), pre_p, NULL, is_gimple_val, fb_rvalue); if (ret == GS_ERROR) break; } gimplify_seq_add_stmt (pre_p, gimple_build_goto (GOTO_DESTINATION (*expr_p))); ret = GS_ALL_DONE; break; case PREDICT_EXPR: gimplify_seq_add_stmt (pre_p, gimple_build_predict (PREDICT_EXPR_PREDICTOR (*expr_p), PREDICT_EXPR_OUTCOME (*expr_p))); ret = GS_ALL_DONE; break; case LABEL_EXPR: ret = GS_ALL_DONE; gcc_assert (decl_function_context (LABEL_EXPR_LABEL (*expr_p)) == current_function_decl); gimplify_seq_add_stmt (pre_p, gimple_build_label (LABEL_EXPR_LABEL (*expr_p))); break; case CASE_LABEL_EXPR: ret = gimplify_case_label_expr (expr_p, pre_p); break; case RETURN_EXPR: ret = gimplify_return_expr (*expr_p, pre_p); break; case CONSTRUCTOR: /* Don't reduce this in place; let gimplify_init_constructor work its magic. Buf if we're just elaborating this for side effects, just gimplify any element that has side-effects. */ if (fallback == fb_none) { unsigned HOST_WIDE_INT ix; tree val; tree temp = NULL_TREE; FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (*expr_p), ix, val) if (TREE_SIDE_EFFECTS (val)) append_to_statement_list (val, &temp); *expr_p = temp; ret = temp ? GS_OK : GS_ALL_DONE; } /* C99 code may assign to an array in a constructed structure or union, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ else if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); mark_addressable (*expr_p); ret = GS_OK; } else ret = GS_ALL_DONE; break; /* The following are special cases that are not handled by the original GIMPLE grammar. */ /* SAVE_EXPR nodes are converted into a GIMPLE identifier and eliminated. */ case SAVE_EXPR: ret = gimplify_save_expr (expr_p, pre_p, post_p); break; case BIT_FIELD_REF: { enum gimplify_status r0, r1, r2; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_lvalue, fb_either); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); ret = MIN (r0, MIN (r1, r2)); } break; case TARGET_MEM_REF: { enum gimplify_status r0 = GS_ALL_DONE, r1 = GS_ALL_DONE; if (TMR_BASE (*expr_p)) r0 = gimplify_expr (&TMR_BASE (*expr_p), pre_p, post_p, is_gimple_mem_ref_addr, fb_either); if (TMR_INDEX (*expr_p)) r1 = gimplify_expr (&TMR_INDEX (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); if (TMR_INDEX2 (*expr_p)) r1 = gimplify_expr (&TMR_INDEX2 (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); /* TMR_STEP and TMR_OFFSET are always integer constants. */ ret = MIN (r0, r1); } break; case NON_LVALUE_EXPR: /* This should have been stripped above. */ gcc_unreachable (); case ASM_EXPR: ret = gimplify_asm_expr (expr_p, pre_p, post_p); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: { gimple_seq eval, cleanup; gimple try_; eval = cleanup = NULL; gimplify_and_add (TREE_OPERAND (*expr_p, 0), &eval); gimplify_and_add (TREE_OPERAND (*expr_p, 1), &cleanup); /* Don't create bogus GIMPLE_TRY with empty cleanup. */ if (gimple_seq_empty_p (cleanup)) { gimple_seq_add_seq (pre_p, eval); ret = GS_ALL_DONE; break; } try_ = gimple_build_try (eval, cleanup, TREE_CODE (*expr_p) == TRY_FINALLY_EXPR ? GIMPLE_TRY_FINALLY : GIMPLE_TRY_CATCH); if (TREE_CODE (*expr_p) == TRY_CATCH_EXPR) gimple_try_set_catch_is_cleanup (try_, TRY_CATCH_IS_CLEANUP (*expr_p)); gimplify_seq_add_stmt (pre_p, try_); ret = GS_ALL_DONE; break; } case CLEANUP_POINT_EXPR: ret = gimplify_cleanup_point_expr (expr_p, pre_p); break; case TARGET_EXPR: ret = gimplify_target_expr (expr_p, pre_p, post_p); break; case CATCH_EXPR: { gimple c; gimple_seq handler = NULL; gimplify_and_add (CATCH_BODY (*expr_p), &handler); c = gimple_build_catch (CATCH_TYPES (*expr_p), handler); gimplify_seq_add_stmt (pre_p, c); ret = GS_ALL_DONE; break; } case EH_FILTER_EXPR: { gimple ehf; gimple_seq failure = NULL; gimplify_and_add (EH_FILTER_FAILURE (*expr_p), &failure); ehf = gimple_build_eh_filter (EH_FILTER_TYPES (*expr_p), failure); gimple_set_no_warning (ehf, TREE_NO_WARNING (*expr_p)); gimplify_seq_add_stmt (pre_p, ehf); ret = GS_ALL_DONE; break; } case OBJ_TYPE_REF: { enum gimplify_status r0, r1; r0 = gimplify_expr (&OBJ_TYPE_REF_OBJECT (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&OBJ_TYPE_REF_EXPR (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); TREE_SIDE_EFFECTS (*expr_p) = 0; ret = MIN (r0, r1); } break; case LABEL_DECL: /* We get here when taking the address of a label. We mark the label as "forced"; meaning it can never be removed and it is a potential target for any computed goto. */ FORCED_LABEL (*expr_p) = 1; ret = GS_ALL_DONE; break; case STATEMENT_LIST: ret = gimplify_statement_list (expr_p, pre_p); break; case WITH_SIZE_EXPR: { gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p == &internal_post ? NULL : post_p, gimple_test_f, fallback); gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = GS_ALL_DONE; } break; case VAR_DECL: case PARM_DECL: ret = gimplify_var_or_parm_decl (expr_p); break; case RESULT_DECL: ret = GS_ALL_DONE; break; case SSA_NAME: /* Allow callbacks into the gimplifier during optimization. */ ret = GS_ALL_DONE; break; case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: { tree orig_type = TREE_TYPE (*expr_p); tree new_type, xop0, xop1; *expr_p = gimple_boolify (*expr_p); new_type = TREE_TYPE (*expr_p); if (!useless_type_conversion_p (orig_type, new_type)) { *expr_p = fold_convert_loc (input_location, orig_type, *expr_p); ret = GS_OK; break; } /* Boolified binary truth expressions are semantically equivalent to bitwise binary expressions. Canonicalize them to the bitwise variant. */ switch (TREE_CODE (*expr_p)) { case TRUTH_AND_EXPR: TREE_SET_CODE (*expr_p, BIT_AND_EXPR); break; case TRUTH_OR_EXPR: TREE_SET_CODE (*expr_p, BIT_IOR_EXPR); break; case TRUTH_XOR_EXPR: TREE_SET_CODE (*expr_p, BIT_XOR_EXPR); break; default: break; } /* Now make sure that operands have compatible type to expression's new_type. */ xop0 = TREE_OPERAND (*expr_p, 0); xop1 = TREE_OPERAND (*expr_p, 1); if (!useless_type_conversion_p (new_type, TREE_TYPE (xop0))) TREE_OPERAND (*expr_p, 0) = fold_convert_loc (input_location, new_type, xop0); if (!useless_type_conversion_p (new_type, TREE_TYPE (xop1))) TREE_OPERAND (*expr_p, 1) = fold_convert_loc (input_location, new_type, xop1); /* Continue classified as tcc_binary. */ goto expr_2; } case FMA_EXPR: case VEC_PERM_EXPR: /* Classified as tcc_expression. */ goto expr_3; case POINTER_PLUS_EXPR: { enum gimplify_status r0, r1; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); ret = MIN (r0, r1); /* Convert &X + CST to invariant &MEM[&X, CST]. Do this after gimplifying operands - this is similar to how it would be folding all gimplified stmts on creation to have them canonicalized, which is what we eventually should do anyway. */ if (TREE_CODE (TREE_OPERAND (*expr_p, 1)) == INTEGER_CST && is_gimple_min_invariant (TREE_OPERAND (*expr_p, 0))) { *expr_p = build_fold_addr_expr_with_type_loc (input_location, fold_build2 (MEM_REF, TREE_TYPE (TREE_TYPE (*expr_p)), TREE_OPERAND (*expr_p, 0), fold_convert (ptr_type_node, TREE_OPERAND (*expr_p, 1))), TREE_TYPE (*expr_p)); ret = MIN (ret, GS_OK); } break; } default: switch (TREE_CODE_CLASS (TREE_CODE (*expr_p))) { case tcc_comparison: /* Handle comparison of objects of non scalar mode aggregates with a call to memcmp. It would be nice to only have to do this for variable-sized objects, but then we'd have to allow the same nest of reference nodes we allow for MODIFY_EXPR and that's too complex. Compare scalar mode aggregates as scalar mode values. Using memcmp for them would be very inefficient at best, and is plain wrong if bitfields are involved. */ { tree type = TREE_TYPE (TREE_OPERAND (*expr_p, 1)); /* Vector comparisons need no boolification. */ if (TREE_CODE (type) == VECTOR_TYPE) goto expr_2; else if (!AGGREGATE_TYPE_P (type)) { tree org_type = TREE_TYPE (*expr_p); *expr_p = gimple_boolify (*expr_p); if (!useless_type_conversion_p (org_type, TREE_TYPE (*expr_p))) { *expr_p = fold_convert_loc (input_location, org_type, *expr_p); ret = GS_OK; } else goto expr_2; } else if (TYPE_MODE (type) != BLKmode) ret = gimplify_scalar_mode_aggregate_compare (expr_p); else ret = gimplify_variable_sized_compare (expr_p); break; } /* If *EXPR_P does not need to be special-cased, handle it according to its class. */ case tcc_unary: ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); break; case tcc_binary: expr_2: { enum gimplify_status r0, r1; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (r0, r1); break; } expr_3: { enum gimplify_status r0, r1, r2; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (MIN (r0, r1), r2); break; } case tcc_declaration: case tcc_constant: ret = GS_ALL_DONE; goto dont_recalculate; default: gcc_unreachable (); } recalculate_side_effects (*expr_p); dont_recalculate: break; } gcc_assert (*expr_p || ret != GS_OK); } while (ret == GS_OK); /* If we encountered an error_mark somewhere nested inside, either stub out the statement or propagate the error back out. */ if (ret == GS_ERROR) { if (is_statement) *expr_p = NULL; goto out; } /* This was only valid as a return value from the langhook, which we handled. Make sure it doesn't escape from any other context. */ gcc_assert (ret != GS_UNHANDLED); if (fallback == fb_none && *expr_p && !is_gimple_stmt (*expr_p)) { /* We aren't looking for a value, and we don't have a valid statement. If it doesn't have side-effects, throw it away. */ if (!TREE_SIDE_EFFECTS (*expr_p)) *expr_p = NULL; else if (!TREE_THIS_VOLATILE (*expr_p)) { /* This is probably a _REF that contains something nested that has side effects. Recurse through the operands to find it. */ enum tree_code code = TREE_CODE (*expr_p); switch (code) { case COMPONENT_REF: case REALPART_EXPR: case IMAGPART_EXPR: case VIEW_CONVERT_EXPR: gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, gimple_test_f, fallback); break; case ARRAY_REF: case ARRAY_RANGE_REF: gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, gimple_test_f, fallback); gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, gimple_test_f, fallback); break; default: /* Anything else with side-effects must be converted to a valid statement before we get here. */ gcc_unreachable (); } *expr_p = NULL; } else if (COMPLETE_TYPE_P (TREE_TYPE (*expr_p)) && TYPE_MODE (TREE_TYPE (*expr_p)) != BLKmode) { /* Historically, the compiler has treated a bare reference to a non-BLKmode volatile lvalue as forcing a load. */ tree type = TYPE_MAIN_VARIANT (TREE_TYPE (*expr_p)); /* Normally, we do not want to create a temporary for a TREE_ADDRESSABLE type because such a type should not be copied by bitwise-assignment. However, we make an exception here, as all we are doing here is ensuring that we read the bytes that make up the type. We use create_tmp_var_raw because create_tmp_var will abort when given a TREE_ADDRESSABLE type. */ tree tmp = create_tmp_var_raw (type, "vol"); gimple_add_tmp_var (tmp); gimplify_assign (tmp, *expr_p, pre_p); *expr_p = NULL; } else /* We can't do anything useful with a volatile reference to an incomplete type, so just throw it away. Likewise for a BLKmode type, since any implicit inner load should already have been turned into an explicit one by the gimplification process. */ *expr_p = NULL; } /* If we are gimplifying at the statement level, we're done. Tack everything together and return. */ if (fallback == fb_none || is_statement) { /* Since *EXPR_P has been converted into a GIMPLE tuple, clear it out for GC to reclaim it. */ *expr_p = NULL_TREE; if (!gimple_seq_empty_p (internal_pre) || !gimple_seq_empty_p (internal_post)) { gimplify_seq_add_seq (&internal_pre, internal_post); gimplify_seq_add_seq (pre_p, internal_pre); } /* The result of gimplifying *EXPR_P is going to be the last few statements in *PRE_P and *POST_P. Add location information to all the statements that were added by the gimplification helpers. */ if (!gimple_seq_empty_p (*pre_p)) annotate_all_with_location_after (*pre_p, pre_last_gsi, input_location); if (!gimple_seq_empty_p (*post_p)) annotate_all_with_location_after (*post_p, post_last_gsi, input_location); goto out; } #if 1 // ENABLE_GIMPLE_CHECKING if (*expr_p) { enum tree_code code = TREE_CODE (*expr_p); /* These expressions should already be in gimple IR form. */ gcc_assert (code != MODIFY_EXPR && code != ASM_EXPR && code != BIND_EXPR && code != CATCH_EXPR && (code != COND_EXPR || gimplify_ctxp->allow_rhs_cond_expr) && code != EH_FILTER_EXPR && code != GOTO_EXPR && code != LABEL_EXPR && code != LOOP_EXPR && code != SWITCH_EXPR && code != TRY_FINALLY_EXPR && code != OMP_CRITICAL && code != OMP_FOR && code != OMP_MASTER && code != OMP_ORDERED && code != OMP_PARALLEL && code != OMP_SECTIONS && code != OMP_SECTION && code != OMP_SINGLE); } #endif /* Otherwise we're gimplifying a subexpression, so the resulting value is interesting. If it's a valid operand that matches GIMPLE_TEST_F, we're done. Unless we are handling some post-effects internally; if that's the case, we need to copy into a temporary before adding the post-effects to POST_P. */ if (gimple_seq_empty_p (internal_post) && (*gimple_test_f) (*expr_p)) goto out; /* Otherwise, we need to create a new temporary for the gimplified expression. */ /* We can't return an lvalue if we have an internal postqueue. The object the lvalue refers to would (probably) be modified by the postqueue; we need to copy the value out first, which means an rvalue. */ if ((fallback & fb_lvalue) && gimple_seq_empty_p (internal_post) && is_gimple_addressable (*expr_p)) { /* An lvalue will do. Take the address of the expression, store it in a temporary, and replace the expression with an INDIRECT_REF of that temporary. */ tmp = build_fold_addr_expr_loc (input_location, *expr_p); gimplify_expr (&tmp, pre_p, post_p, is_gimple_reg, fb_rvalue); *expr_p = build_simple_mem_ref (tmp); } else if ((fallback & fb_rvalue) && is_gimple_reg_rhs_or_call (*expr_p)) { /* An rvalue will do. Assign the gimplified expression into a new temporary TMP and replace the original expression with TMP. First, make sure that the expression has a type so that it can be assigned into a temporary. */ gcc_assert (!VOID_TYPE_P (TREE_TYPE (*expr_p))); if (!gimple_seq_empty_p (internal_post) || (fallback & fb_lvalue)) /* The postqueue might change the value of the expression between the initialization and use of the temporary, so we can't use a formal temp. FIXME do we care? */ { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); if (TREE_CODE (TREE_TYPE (*expr_p)) == COMPLEX_TYPE || TREE_CODE (TREE_TYPE (*expr_p)) == VECTOR_TYPE) DECL_GIMPLE_REG_P (*expr_p) = 1; } else *expr_p = get_formal_tmp_var (*expr_p, pre_p); } else { #ifdef ENABLE_GIMPLE_CHECKING if (!(fallback & fb_mayfail)) { fprintf (stderr, "gimplification failed:\n"); print_generic_expr (stderr, *expr_p, 0); debug_tree (*expr_p); internal_error ("gimplification failed"); } #endif gcc_assert (fallback & fb_mayfail); /* If this is an asm statement, and the user asked for the impossible, don't die. Fail and let gimplify_asm_expr issue an error. */ ret = GS_ERROR; goto out; } /* Make sure the temporary matches our predicate. */ gcc_assert ((*gimple_test_f) (*expr_p)); if (!gimple_seq_empty_p (internal_post)) { annotate_all_with_location (internal_post, input_location); gimplify_seq_add_seq (pre_p, internal_post); } out: input_location = saved_location; return ret; } /* Look through TYPE for variable-sized objects and gimplify each such size that we find. Add to LIST_P any statements generated. */ void gimplify_type_sizes (tree type, gimple_seq *list_p) { tree field, t; if (type == NULL || type == error_mark_node) return; /* We first do the main variant, then copy into any other variants. */ type = TYPE_MAIN_VARIANT (type); /* Avoid infinite recursion. */ if (TYPE_SIZES_GIMPLIFIED (type)) return; TYPE_SIZES_GIMPLIFIED (type) = 1; switch (TREE_CODE (type)) { case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case REAL_TYPE: case FIXED_POINT_TYPE: gimplify_one_sizepos (&TYPE_MIN_VALUE (type), list_p); gimplify_one_sizepos (&TYPE_MAX_VALUE (type), list_p); for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) { TYPE_MIN_VALUE (t) = TYPE_MIN_VALUE (type); TYPE_MAX_VALUE (t) = TYPE_MAX_VALUE (type); } break; case ARRAY_TYPE: /* These types may not have declarations, so handle them here. */ gimplify_type_sizes (TREE_TYPE (type), list_p); gimplify_type_sizes (TYPE_DOMAIN (type), list_p); /* Ensure VLA bounds aren't removed, for -O0 they should be variables with assigned stack slots, for -O1+ -g they should be tracked by VTA. */ if (!(TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL && DECL_IGNORED_P (TYPE_NAME (type))) && TYPE_DOMAIN (type) && INTEGRAL_TYPE_P (TYPE_DOMAIN (type))) { t = TYPE_MIN_VALUE (TYPE_DOMAIN (type)); if (t && TREE_CODE (t) == VAR_DECL && DECL_ARTIFICIAL (t)) DECL_IGNORED_P (t) = 0; t = TYPE_MAX_VALUE (TYPE_DOMAIN (type)); if (t && TREE_CODE (t) == VAR_DECL && DECL_ARTIFICIAL (t)) DECL_IGNORED_P (t) = 0; } break; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL) { gimplify_one_sizepos (&DECL_FIELD_OFFSET (field), list_p); gimplify_one_sizepos (&DECL_SIZE (field), list_p); gimplify_one_sizepos (&DECL_SIZE_UNIT (field), list_p); gimplify_type_sizes (TREE_TYPE (field), list_p); } break; case POINTER_TYPE: case REFERENCE_TYPE: /* We used to recurse on the pointed-to type here, which turned out to be incorrect because its definition might refer to variables not yet initialized at this point if a forward declaration is involved. It was actually useful for anonymous pointed-to types to ensure that the sizes evaluation dominates every possible later use of the values. Restricting to such types here would be safe since there is no possible forward declaration around, but would introduce an undesirable middle-end semantic to anonymity. We then defer to front-ends the responsibility of ensuring that the sizes are evaluated both early and late enough, e.g. by attaching artificial type declarations to the tree. */ break; default: break; } gimplify_one_sizepos (&TYPE_SIZE (type), list_p); gimplify_one_sizepos (&TYPE_SIZE_UNIT (type), list_p); for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) { TYPE_SIZE (t) = TYPE_SIZE (type); TYPE_SIZE_UNIT (t) = TYPE_SIZE_UNIT (type); TYPE_SIZES_GIMPLIFIED (t) = 1; } } /* A subroutine of gimplify_type_sizes to make sure that *EXPR_P, a size or position, has had all of its SAVE_EXPRs evaluated. We add any required statements to *STMT_P. */ void gimplify_one_sizepos (tree *expr_p, gimple_seq *stmt_p) { tree type, expr = *expr_p; /* We don't do anything if the value isn't there, is constant, or contains A PLACEHOLDER_EXPR. We also don't want to do anything if it's already a VAR_DECL. If it's a VAR_DECL from another function, the gimplifier will want to replace it with a new variable, but that will cause problems if this type is from outside the function. It's OK to have that here. */ if (expr == NULL_TREE || TREE_CONSTANT (expr) || TREE_CODE (expr) == VAR_DECL || CONTAINS_PLACEHOLDER_P (expr)) return; type = TREE_TYPE (expr); *expr_p = unshare_expr (expr); gimplify_expr (expr_p, stmt_p, NULL, is_gimple_val, fb_rvalue); expr = *expr_p; /* Verify that we've an exact type match with the original expression. In particular, we do not wish to drop a "sizetype" in favour of a type of similar dimensions. We don't want to pollute the generic type-stripping code with this knowledge because it doesn't matter for the bulk of GENERIC/GIMPLE. It only matters that TYPE_SIZE_UNIT and friends retain their "sizetype-ness". */ if (TREE_TYPE (expr) != type && TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)) { tree tmp; gimple stmt; *expr_p = create_tmp_var (type, NULL); tmp = build1 (NOP_EXPR, type, expr); stmt = gimplify_assign (*expr_p, tmp, stmt_p); gimple_set_location (stmt, EXPR_LOC_OR_HERE (expr)); } } /* Gimplify the body of statements of FNDECL and return a GIMPLE_BIND node containing the sequence of corresponding GIMPLE statements. If DO_PARMS is true, also gimplify the parameters. */ gimple gimplify_body (tree fndecl, bool do_parms) { location_t saved_location = input_location; gimple_seq parm_stmts, seq; gimple outer_bind; struct gimplify_ctx gctx; struct cgraph_node *cgn; timevar_push (TV_TREE_GIMPLIFY); /* Initialize for optimize_insn_for_s{ize,peed}_p possibly called during gimplification. */ default_rtl_profile (); gcc_assert (gimplify_ctxp == NULL); push_gimplify_context (&gctx); /* Unshare most shared trees in the body and in that of any nested functions. It would seem we don't have to do this for nested functions because they are supposed to be output and then the outer function gimplified first, but the g++ front end doesn't always do it that way. */ unshare_body (fndecl); unvisit_body (fndecl); cgn = cgraph_get_node (fndecl); if (cgn && cgn->origin) nonlocal_vlas = pointer_set_create (); /* Make sure input_location isn't set to something weird. */ input_location = DECL_SOURCE_LOCATION (fndecl); /* Resolve callee-copies. This has to be done before processing the body so that DECL_VALUE_EXPR gets processed correctly. */ parm_stmts = do_parms ? gimplify_parameters () : NULL; /* Gimplify the function's body. */ seq = NULL; gimplify_stmt (&DECL_SAVED_TREE (fndecl), &seq); outer_bind = gimple_seq_first_stmt (seq); if (!outer_bind) { outer_bind = gimple_build_nop (); gimplify_seq_add_stmt (&seq, outer_bind); } /* The body must contain exactly one statement, a GIMPLE_BIND. If this is not the case, wrap everything in a GIMPLE_BIND to make it so. */ if (gimple_code (outer_bind) == GIMPLE_BIND && gimple_seq_first (seq) == gimple_seq_last (seq)) ; else outer_bind = gimple_build_bind (NULL_TREE, seq, NULL); DECL_SAVED_TREE (fndecl) = NULL_TREE; /* If we had callee-copies statements, insert them at the beginning of the function and clear DECL_VALUE_EXPR_P on the parameters. */ if (!gimple_seq_empty_p (parm_stmts)) { tree parm; gimplify_seq_add_seq (&parm_stmts, gimple_bind_body (outer_bind)); gimple_bind_set_body (outer_bind, parm_stmts); for (parm = DECL_ARGUMENTS (current_function_decl); parm; parm = DECL_CHAIN (parm)) if (DECL_HAS_VALUE_EXPR_P (parm)) { DECL_HAS_VALUE_EXPR_P (parm) = 0; DECL_IGNORED_P (parm) = 0; } } if (nonlocal_vlas) { pointer_set_destroy (nonlocal_vlas); nonlocal_vlas = NULL; } pop_gimplify_context (outer_bind); gcc_assert (gimplify_ctxp == NULL); if (!seen_error ()) verify_gimple_in_seq (gimple_bind_body (outer_bind)); timevar_pop (TV_TREE_GIMPLIFY); input_location = saved_location; return outer_bind; } typedef char *char_p; /* For DEF_VEC_P. */ DEF_VEC_P(char_p); DEF_VEC_ALLOC_P(char_p,heap); /* Return whether we should exclude FNDECL from instrumentation. */ static bool flag_instrument_functions_exclude_p (tree fndecl) { VEC(char_p,heap) *vec; vec = (VEC(char_p,heap) *) flag_instrument_functions_exclude_functions; if (VEC_length (char_p, vec) > 0) { const char *name; int i; char *s; name = lang_hooks.decl_printable_name (fndecl, 0); FOR_EACH_VEC_ELT (char_p, vec, i, s) if (strstr (name, s) != NULL) return true; } vec = (VEC(char_p,heap) *) flag_instrument_functions_exclude_files; if (VEC_length (char_p, vec) > 0) { const char *name; int i; char *s; name = DECL_SOURCE_FILE (fndecl); FOR_EACH_VEC_ELT (char_p, vec, i, s) if (strstr (name, s) != NULL) return true; } return false; } /* Entry point to the gimplification pass. FNDECL is the FUNCTION_DECL node for the function we want to gimplify. Return the sequence of GIMPLE statements corresponding to the body of FNDECL. */ void gimplify_function_tree (tree fndecl) { tree oldfn, parm, ret; gimple_seq seq; gimple bind; gcc_assert (!gimple_body (fndecl)); oldfn = current_function_decl; current_function_decl = fndecl; if (DECL_STRUCT_FUNCTION (fndecl)) push_cfun (DECL_STRUCT_FUNCTION (fndecl)); else push_struct_function (fndecl); for (parm = DECL_ARGUMENTS (fndecl); parm ; parm = DECL_CHAIN (parm)) { /* Preliminarily mark non-addressed complex variables as eligible for promotion to gimple registers. We'll transform their uses as we find them. */ if ((TREE_CODE (TREE_TYPE (parm)) == COMPLEX_TYPE || TREE_CODE (TREE_TYPE (parm)) == VECTOR_TYPE) && !TREE_THIS_VOLATILE (parm) && !needs_to_live_in_memory (parm)) DECL_GIMPLE_REG_P (parm) = 1; } ret = DECL_RESULT (fndecl); if ((TREE_CODE (TREE_TYPE (ret)) == COMPLEX_TYPE || TREE_CODE (TREE_TYPE (ret)) == VECTOR_TYPE) && !needs_to_live_in_memory (ret)) DECL_GIMPLE_REG_P (ret) = 1; bind = gimplify_body (fndecl, true); /* The tree body of the function is no longer needed, replace it with the new GIMPLE body. */ seq = gimple_seq_alloc (); gimple_seq_add_stmt (&seq, bind); gimple_set_body (fndecl, seq); /* If we're instrumenting function entry/exit, then prepend the call to the entry hook and wrap the whole function in a TRY_FINALLY_EXPR to catch the exit hook. */ /* ??? Add some way to ignore exceptions for this TFE. */ if (flag_instrument_function_entry_exit && !DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (fndecl) && !flag_instrument_functions_exclude_p (fndecl)) { tree x; gimple new_bind; gimple tf; gimple_seq cleanup = NULL, body = NULL; tree tmp_var; gimple call; x = builtin_decl_implicit (BUILT_IN_RETURN_ADDRESS); call = gimple_build_call (x, 1, integer_zero_node); tmp_var = create_tmp_var (ptr_type_node, "return_addr"); gimple_call_set_lhs (call, tmp_var); gimplify_seq_add_stmt (&cleanup, call); x = builtin_decl_implicit (BUILT_IN_PROFILE_FUNC_EXIT); call = gimple_build_call (x, 2, build_fold_addr_expr (current_function_decl), tmp_var); gimplify_seq_add_stmt (&cleanup, call); tf = gimple_build_try (seq, cleanup, GIMPLE_TRY_FINALLY); x = builtin_decl_implicit (BUILT_IN_RETURN_ADDRESS); call = gimple_build_call (x, 1, integer_zero_node); tmp_var = create_tmp_var (ptr_type_node, "return_addr"); gimple_call_set_lhs (call, tmp_var); gimplify_seq_add_stmt (&body, call); x = builtin_decl_implicit (BUILT_IN_PROFILE_FUNC_ENTER); call = gimple_build_call (x, 2, build_fold_addr_expr (current_function_decl), tmp_var); gimplify_seq_add_stmt (&body, call); gimplify_seq_add_stmt (&body, tf); new_bind = gimple_build_bind (NULL, body, gimple_bind_block (bind)); /* Clear the block for BIND, since it is no longer directly inside the function, but within a try block. */ gimple_bind_set_block (bind, NULL); /* Replace the current function body with the body wrapped in the try/finally TF. */ seq = gimple_seq_alloc (); gimple_seq_add_stmt (&seq, new_bind); gimple_set_body (fndecl, seq); } DECL_SAVED_TREE (fndecl) = NULL_TREE; cfun->curr_properties = PROP_gimple_any; current_function_decl = oldfn; pop_cfun (); } /* Some transformations like inlining may invalidate the GIMPLE form for operands. This function traverses all the operands in STMT and gimplifies anything that is not a valid gimple operand. Any new GIMPLE statements are inserted before *GSI_P. */ void gimple_regimplify_operands (gimple stmt, gimple_stmt_iterator *gsi_p) { size_t i, num_ops; tree orig_lhs = NULL_TREE, lhs, t; gimple_seq pre = NULL; gimple post_stmt = NULL; struct gimplify_ctx gctx; push_gimplify_context (&gctx); gimplify_ctxp->into_ssa = gimple_in_ssa_p (cfun); switch (gimple_code (stmt)) { case GIMPLE_COND: gimplify_expr (gimple_cond_lhs_ptr (stmt), &pre, NULL, is_gimple_val, fb_rvalue); gimplify_expr (gimple_cond_rhs_ptr (stmt), &pre, NULL, is_gimple_val, fb_rvalue); break; case GIMPLE_SWITCH: gimplify_expr (gimple_switch_index_ptr (stmt), &pre, NULL, is_gimple_val, fb_rvalue); break; case GIMPLE_ASM: { size_t i, noutputs = gimple_asm_noutputs (stmt); const char *constraint, **oconstraints; bool allows_mem, allows_reg, is_inout; oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *)); for (i = 0; i < noutputs; i++) { tree op = gimple_asm_output_op (stmt, i); constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (op))); oconstraints[i] = constraint; parse_output_constraint (&constraint, i, 0, 0, &allows_mem, &allows_reg, &is_inout); gimplify_expr (&TREE_VALUE (op), &pre, NULL, is_inout ? is_gimple_min_lval : is_gimple_lvalue, fb_lvalue | fb_mayfail); } for (i = 0; i < gimple_asm_ninputs (stmt); i++) { tree op = gimple_asm_input_op (stmt, i); constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (op))); parse_input_constraint (&constraint, 0, 0, noutputs, 0, oconstraints, &allows_mem, &allows_reg); if (TREE_ADDRESSABLE (TREE_TYPE (TREE_VALUE (op))) && allows_mem) allows_reg = 0; if (!allows_reg && allows_mem) gimplify_expr (&TREE_VALUE (op), &pre, NULL, is_gimple_lvalue, fb_lvalue | fb_mayfail); else gimplify_expr (&TREE_VALUE (op), &pre, NULL, is_gimple_asm_val, fb_rvalue); } } break; default: /* NOTE: We start gimplifying operands from last to first to make sure that side-effects on the RHS of calls, assignments and ASMs are executed before the LHS. The ordering is not important for other statements. */ num_ops = gimple_num_ops (stmt); orig_lhs = gimple_get_lhs (stmt); for (i = num_ops; i > 0; i--) { tree op = gimple_op (stmt, i - 1); if (op == NULL_TREE) continue; if (i == 1 && (is_gimple_call (stmt) || is_gimple_assign (stmt))) gimplify_expr (&op, &pre, NULL, is_gimple_lvalue, fb_lvalue); else if (i == 2 && is_gimple_assign (stmt) && num_ops == 2 && get_gimple_rhs_class (gimple_expr_code (stmt)) == GIMPLE_SINGLE_RHS) gimplify_expr (&op, &pre, NULL, rhs_predicate_for (gimple_assign_lhs (stmt)), fb_rvalue); else if (i == 2 && is_gimple_call (stmt)) { if (TREE_CODE (op) == FUNCTION_DECL) continue; gimplify_expr (&op, &pre, NULL, is_gimple_call_addr, fb_rvalue); } else gimplify_expr (&op, &pre, NULL, is_gimple_val, fb_rvalue); gimple_set_op (stmt, i - 1, op); } lhs = gimple_get_lhs (stmt); /* If the LHS changed it in a way that requires a simple RHS, create temporary. */ if (lhs && !is_gimple_reg (lhs)) { bool need_temp = false; if (is_gimple_assign (stmt) && num_ops == 2 && get_gimple_rhs_class (gimple_expr_code (stmt)) == GIMPLE_SINGLE_RHS) gimplify_expr (gimple_assign_rhs1_ptr (stmt), &pre, NULL, rhs_predicate_for (gimple_assign_lhs (stmt)), fb_rvalue); else if (is_gimple_reg (lhs)) { if (is_gimple_reg_type (TREE_TYPE (lhs))) { if (is_gimple_call (stmt)) { i = gimple_call_flags (stmt); if ((i & ECF_LOOPING_CONST_OR_PURE) || !(i & (ECF_CONST | ECF_PURE))) need_temp = true; } if (stmt_can_throw_internal (stmt)) need_temp = true; } } else { if (is_gimple_reg_type (TREE_TYPE (lhs))) need_temp = true; else if (TYPE_MODE (TREE_TYPE (lhs)) != BLKmode) { if (is_gimple_call (stmt)) { tree fndecl = gimple_call_fndecl (stmt); if (!aggregate_value_p (TREE_TYPE (lhs), fndecl) && !(fndecl && DECL_RESULT (fndecl) && DECL_BY_REFERENCE (DECL_RESULT (fndecl)))) need_temp = true; } else need_temp = true; } } if (need_temp) { tree temp = create_tmp_reg (TREE_TYPE (lhs), NULL); if (TREE_CODE (orig_lhs) == SSA_NAME) orig_lhs = SSA_NAME_VAR (orig_lhs); if (gimple_in_ssa_p (cfun)) temp = make_ssa_name (temp, NULL); gimple_set_lhs (stmt, temp); post_stmt = gimple_build_assign (lhs, temp); if (TREE_CODE (lhs) == SSA_NAME) SSA_NAME_DEF_STMT (lhs) = post_stmt; } } break; } if (gimple_referenced_vars (cfun)) for (t = gimplify_ctxp->temps; t ; t = TREE_CHAIN (t)) add_referenced_var (t); if (!gimple_seq_empty_p (pre)) { if (gimple_in_ssa_p (cfun)) { gimple_stmt_iterator i; for (i = gsi_start (pre); !gsi_end_p (i); gsi_next (&i)) mark_symbols_for_renaming (gsi_stmt (i)); } gsi_insert_seq_before (gsi_p, pre, GSI_SAME_STMT); } if (post_stmt) gsi_insert_after (gsi_p, post_stmt, GSI_NEW_STMT); pop_gimplify_context (NULL); } /* Expand EXPR to list of gimple statements STMTS. GIMPLE_TEST_F specifies the predicate that will hold for the result. If VAR is not NULL, make the base variable of the final destination be VAR if suitable. */ tree force_gimple_operand_1 (tree expr, gimple_seq *stmts, gimple_predicate gimple_test_f, tree var) { tree t; enum gimplify_status ret; struct gimplify_ctx gctx; *stmts = NULL; /* gimple_test_f might be more strict than is_gimple_val, make sure we pass both. Just checking gimple_test_f doesn't work because most gimple predicates do not work recursively. */ if (is_gimple_val (expr) && (*gimple_test_f) (expr)) return expr; push_gimplify_context (&gctx); gimplify_ctxp->into_ssa = gimple_in_ssa_p (cfun); gimplify_ctxp->allow_rhs_cond_expr = true; if (var) expr = build2 (MODIFY_EXPR, TREE_TYPE (var), var, expr); if (TREE_CODE (expr) != MODIFY_EXPR && TREE_TYPE (expr) == void_type_node) { gimplify_and_add (expr, stmts); expr = NULL_TREE; } else { ret = gimplify_expr (&expr, stmts, NULL, gimple_test_f, fb_rvalue); gcc_assert (ret != GS_ERROR); } if (gimple_referenced_vars (cfun)) for (t = gimplify_ctxp->temps; t ; t = DECL_CHAIN (t)) add_referenced_var (t); pop_gimplify_context (NULL); return expr; } /* Expand EXPR to list of gimple statements STMTS. If SIMPLE is true, force the result to be either ssa_name or an invariant, otherwise just force it to be a rhs expression. If VAR is not NULL, make the base variable of the final destination be VAR if suitable. */ tree force_gimple_operand (tree expr, gimple_seq *stmts, bool simple, tree var) { return force_gimple_operand_1 (expr, stmts, simple ? is_gimple_val : is_gimple_reg_rhs, var); } /* Invoke force_gimple_operand_1 for EXPR with parameters GIMPLE_TEST_F and VAR. If some statements are produced, emits them at GSI. If BEFORE is true. the statements are appended before GSI, otherwise they are appended after it. M specifies the way GSI moves after insertion (GSI_SAME_STMT or GSI_CONTINUE_LINKING are the usual values). */ tree force_gimple_operand_gsi_1 (gimple_stmt_iterator *gsi, tree expr, gimple_predicate gimple_test_f, tree var, bool before, enum gsi_iterator_update m) { gimple_seq stmts; expr = force_gimple_operand_1 (expr, &stmts, gimple_test_f, var); if (!gimple_seq_empty_p (stmts)) { if (gimple_in_ssa_p (cfun)) { gimple_stmt_iterator i; for (i = gsi_start (stmts); !gsi_end_p (i); gsi_next (&i)) mark_symbols_for_renaming (gsi_stmt (i)); } if (before) gsi_insert_seq_before (gsi, stmts, m); else gsi_insert_seq_after (gsi, stmts, m); } return expr; } /* Invoke force_gimple_operand_1 for EXPR with parameter VAR. If SIMPLE is true, force the result to be either ssa_name or an invariant, otherwise just force it to be a rhs expression. If some statements are produced, emits them at GSI. If BEFORE is true, the statements are appended before GSI, otherwise they are appended after it. M specifies the way GSI moves after insertion (GSI_SAME_STMT or GSI_CONTINUE_LINKING are the usual values). */ tree force_gimple_operand_gsi (gimple_stmt_iterator *gsi, tree expr, bool simple_p, tree var, bool before, enum gsi_iterator_update m) { return force_gimple_operand_gsi_1 (gsi, expr, simple_p ? is_gimple_val : is_gimple_reg_rhs, var, before, m); } #include "gt-gimplify.h"
debug_tools.c
#include<assert.h> #include "debug_tools.h" #include "sputil.h" /* If this has a positive value k, then it is guaranteed that the k-th call to * malloc results in NULL. If the value is non-positive, no such pretended OOM * condition will be triggered. */ static long GLOBAL_failcounter = 0; void FGNSRdbg_setmallocfail(long next_fail) { assert(next_fail >= 0); #pragma omp critical (MALLOCDEBUG) { GLOBAL_failcounter = next_fail; } } void * FGNSRdbg_malloc(size_t s) { void *retval; #pragma omp critical (MALLOCDEBUG) { if ( (GLOBAL_failcounter > 0) && (--GLOBAL_failcounter == 0) ) retval = NULL; else retval = malloc(s); } return retval; } void * FGNSRdbg_calloc(size_t c, size_t s) { void *retval; #pragma omp critical (MALLOCDEBUG) { if ( (GLOBAL_failcounter > 0) && (--GLOBAL_failcounter == 0) ) retval = NULL; else retval = calloc(c,s); } return retval; } void FGNSRdbg_free(void * p) { free(p); }
tmo.h
//#ifndef __TMO_H__ //#define __TMO_H__ //#include "tmo.h" //#include <omp.h> #include <iostream> #include <stdlib.h> #include <stdio.h> #include <math.h> using namespace std; //#include <cv.h> //#include <cxcore.h> //#include <highgui.h> //using namespace cv; void makehdr3(Mat* im1, Mat* im2, Mat* im3, Mat* hdr) { Scalar s1 = mean(*im1); // dark Scalar s2 = mean(*im2); // med Scalar s3 = mean(*im3); // light float mean1 = (s1[1] + s1[2] + s1[3]) / 3; float mean2 = (s2[1] + s2[2] + s2[3]) / 3; float mean3 = (s3[1] + s3[2] + s3[3]) / 3; float r1 = mean1 / mean3 ; float r2 = 1 ; float r3 = mean3 / mean1 ; // add *hdr = (*im1 / r1 + *im2 / r2 + *im3 / r3) ; } void makehdr2(Mat* im1, Mat* im3, Mat* hdr) { Scalar s1 = mean(*im1); // dark Scalar s3 = mean(*im3); // light float mean1 = (s1[1] + s1[2] + s1[3]) / 3; float mean3 = (s3[1] + s3[2] + s3[3]) / 3; float r1 = mean1 / mean3; float r3 = mean3 / mean1; // add *hdr = (*im1 / r1 + *im3 / r3) ; } void makehdr3log(Mat* im1, Mat* im2, Mat* im3, Mat* hdr) { (*im1).convertTo(*im1, CV_32FC3); (*im2).convertTo(*im2, CV_32FC3); (*im3).convertTo(*im3, CV_32FC3); *im1 += .01; *im2 += .01; *im3 += .01; Mat temp1((*im1).reshape(1, hdr->rows)); Mat temp2((*im2).reshape(1, hdr->rows)); Mat temp3((*im3).reshape(1, hdr->rows)); cv::log(temp1, temp1); cv::log(temp2, temp2); cv::log(temp3, temp3); temp1 = temp1.reshape(3, hdr->rows); temp2 = temp2.reshape(3, hdr->rows); temp3 = temp3.reshape(3, hdr->rows); *hdr = (temp1 + temp2 + temp3) / 3; } void makehdr2log(Mat* im1, Mat* im3, Mat* hdr) { (*im1).convertTo(*im1, CV_32FC3); (*im3).convertTo(*im3, CV_32FC3); *im1 += .01; *im3 += .01; Mat temp1((*im1).reshape(1, hdr->rows)); Mat temp3((*im3).reshape(1, hdr->rows)); cv::log(temp1, temp1); cv::log(temp3, temp3); temp1 = temp1.reshape(3, hdr->rows); temp3 = temp3.reshape(3, hdr->rows); *hdr = (temp1 + temp3) / 2; } /** * Modified by Chris McClanahan for Android JNI * * @Brief Contrast mapping TMO * * From: * * Rafal Mantiuk, Karol Myszkowski, Hans-Peter Seidel. * A Perceptual Framework for Contrast Processing of High Dynamic Range Images * In: ACM Transactions on Applied Perception 3 (3), pp. 286-308, 2006 * http://www.mpi-inf.mpg.de/~mantiuk/contrast_domain/ * * This file is a part of LuminanceHDR package, based on pfstmo. * ---------------------------------------------------------------------- * Copyright (C) 2007 Grzegorz Krawczyk * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * ---------------------------------------------------------------------- * * @author Radoslaw Mantiuk, <radoslaw.mantiuk@gmail.com> * @author Rafal Mantiuk, <mantiuk@gmail.com> * Updated 2007/12/17 by Ed Brambley <E.J.Brambley@damtp.cam.ac.uk> * (more information on the changes: * http://www.damtp.cam.ac.uk/user/ejb48/hdr/index.html) * Updated 2008/06/25 by Ed Brambley <E.J.Brambley@damtp.cam.ac.uk> * bug fixes and openMP patches * more on this: * http://groups.google.com/group/pfstools/browse_thread/thread/de2378af98ec6185/0dee5304fc14e99d?hl=en#0dee5304fc14e99d * Optimization improvements by Lebed Dmytry * * Updated 2008/07/26 by Dejan Beric <dejan.beric@live.com> * Added the detail factor slider which offers more control over contrast in details * Update 2010/10/06 by Axel Voitier <axel.voitier@gmail.com> * detail_factor patch in order to remove potential issues in a multithreading environment * @author Davide Anastasia <davideanastasia@users.sourceforge.net> * Improvement & Clean up * @author Bruce Guenter <bruce@untroubled.org> * Added trivial downsample and upsample functions when both dimension are even * * $Id: contrast_domain.cpp,v 1.14 2008/08/26 17:08:49 rafm Exp $ */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> typedef struct pyramid_s { int rows; int cols; float* Gx; float* Gy; struct pyramid_s* next; struct pyramid_s* prev; } pyramid_t; #define PYRAMID_MIN_PIXELS 3 #define LOOKUP_W_TO_R 107 //#define DEBUG_MANTIUK06 void contrast_equalization(pyramid_t* pp, const float contrastFactor); void transform_to_luminance(pyramid_t* pyramid, float* const x, const bool bcg); void matrix_add(const int n, const float* const a, float* const b); void matrix_subtract(const int n, const float* const a, float* const b); void matrix_copy(const int n, const float* const a, float* const b); void matrix_multiply_const(const int n, float* const a, const float val); void matrix_divide(const int n, const float* a, float* b); float* matrix_alloc(const int size); void matrix_free(float* m); float matrix_DotProduct(const int n, const float* const a, const float* const b); void matrix_zero(const int n, float* const m); void calculate_and_add_divergence(const int rows, const int cols, const float* const Gx, const float* Gy, float* divG); void pyramid_calculate_divergence(pyramid_t* pyramid); void pyramid_calculate_divergence_sum(pyramid_t* pyramid, float* divG_sum); void calculate_scale_factor(const int n, const float* const G, float* const C); void pyramid_calculate_scale_factor(pyramid_t* pyramid, pyramid_t* pC); void scale_gradient(const int n, float* G, const float* C); void pyramid_scale_gradient(pyramid_t* pyramid, pyramid_t* pC); void pyramid_free(pyramid_t* pyramid); pyramid_t* pyramid_allocate(const int cols, const int rows); void calculate_gradient(const int cols, const int rows, const float* const lum, float* const Gx, float* const Gy); void pyramid_calculate_gradient(const pyramid_t* pyramid, const float* lum); void solveX(const int n, const float* const b, float* const x); void multiplyA(pyramid_t* px, pyramid_t* pyramid, const float* const x, float* const divG_sum); void linbcg(pyramid_t* pyramid, pyramid_t* pC, const float* const b, float* const x, const int itmax, const float tol); void lincg(pyramid_t* pyramid, pyramid_t* pC, const float* const b, float* const x, const int itmax, const float tol); float lookup_table(const int n, const float* const in_tab, const float* const out_tab, const float val); void transform_to_R(const int n, float* const G, float detail_factor); void pyramid_transform_to_R(pyramid_t* pyramid, float detail_factor); void transform_to_G(const int n, float* const R, float detail_factor); void pyramid_transform_to_G(pyramid_t* pyramid, float detail_factor); void pyramid_gradient_multiply(pyramid_t* pyramid, const float val); void swap_pointers(float* &pOne, float* &pTwo); // utility function void dump_matrix_to_file(const int width, const int height, const float* const m, const char* const file_name); void matrix_show(const char* const text, int rows, int cols, const float* const data); void pyramid_show(pyramid_t* pyramid); static float W_table[] = {0.000000f, 0.010000f, 0.021180f, 0.031830f, 0.042628f, 0.053819f, 0.065556f, 0.077960f, 0.091140f, 0.105203f, 0.120255f, 0.136410f, 0.153788f, 0.172518f, 0.192739f, 0.214605f, 0.238282f, 0.263952f, 0.291817f, 0.322099f, 0.355040f, 0.390911f, 0.430009f, 0.472663f, 0.519238f, 0.570138f, 0.625811f, 0.686754f, 0.753519f, 0.826720f, 0.907041f, 0.995242f, 1.092169f, 1.198767f, 1.316090f, 1.445315f, 1.587756f, 1.744884f, 1.918345f, 2.109983f, 2.321863f, 2.556306f, 2.815914f, 3.103613f, 3.422694f, 3.776862f, 4.170291f, 4.607686f, 5.094361f, 5.636316f, 6.240338f, 6.914106f, 7.666321f, 8.506849f, 9.446889f, 10.499164f, 11.678143f, 13.000302f, 14.484414f, 16.151900f, 18.027221f, 20.138345f, 22.517282f, 25.200713f, 28.230715f, 31.655611f, 35.530967f, 39.920749f, 44.898685f, 50.549857f, 56.972578f, 64.280589f, 72.605654f, 82.100619f, 92.943020f, 105.339358f, 119.530154f, 135.795960f, 154.464484f, 175.919088f, 200.608905f, 229.060934f, 261.894494f, 299.838552f, 343.752526f, 394.651294f, 453.735325f, 522.427053f, 602.414859f, 695.706358f, 804.693100f, 932.229271f, 1081.727632f, 1257.276717f, 1463.784297f, 1707.153398f, 1994.498731f, 2334.413424f, 2737.298517f, 3215.770944f, 3785.169959f, 4464.187290f, 5275.653272f, 6247.520102f, 7414.094945f, 8817.590551f, 10510.080619f}; static float R_table[] = {0.000000f, 0.009434f, 0.018868f, 0.028302f, 0.037736f, 0.047170f, 0.056604f, 0.066038f, 0.075472f, 0.084906f, 0.094340f, 0.103774f, 0.113208f, 0.122642f, 0.132075f, 0.141509f, 0.150943f, 0.160377f, 0.169811f, 0.179245f, 0.188679f, 0.198113f, 0.207547f, 0.216981f, 0.226415f, 0.235849f, 0.245283f, 0.254717f, 0.264151f, 0.273585f, 0.283019f, 0.292453f, 0.301887f, 0.311321f, 0.320755f, 0.330189f, 0.339623f, 0.349057f, 0.358491f, 0.367925f, 0.377358f, 0.386792f, 0.396226f, 0.405660f, 0.415094f, 0.424528f, 0.433962f, 0.443396f, 0.452830f, 0.462264f, 0.471698f, 0.481132f, 0.490566f, 0.500000f, 0.509434f, 0.518868f, 0.528302f, 0.537736f, 0.547170f, 0.556604f, 0.566038f, 0.575472f, 0.584906f, 0.594340f, 0.603774f, 0.613208f, 0.622642f, 0.632075f, 0.641509f, 0.650943f, 0.660377f, 0.669811f, 0.679245f, 0.688679f, 0.698113f, 0.707547f, 0.716981f, 0.726415f, 0.735849f, 0.745283f, 0.754717f, 0.764151f, 0.773585f, 0.783019f, 0.792453f, 0.801887f, 0.811321f, 0.820755f, 0.830189f, 0.839623f, 0.849057f, 0.858491f, 0.867925f, 0.877358f, 0.886792f, 0.896226f, 0.905660f, 0.915094f, 0.924528f, 0.933962f, 0.943396f, 0.952830f, 0.962264f, 0.971698f, 0.981132f, 0.990566f, 1.000000f}; inline int imin(int a, int b) { return a < b ? a : b; } inline float max(float a, float b) { return a > b ? a : b; } inline float min(float a, float b) { return a < b ? a : b; } // upsample the matrix // upsampled matrix is twice bigger in each direction than data[] // res should be a pointer to allocated memory for bigger matrix // cols and rows are the dimensions of the output matrix void matrix_upsample_full(const int outCols, const int outRows, const float* const in, float* const out) { const int inRows = outRows / 2; const int inCols = outCols / 2; // Transpose of experimental downsampling matrix (theoretically the correct thing to do) const float dx = (float)inCols / ((float)outCols); const float dy = (float)inRows / ((float)outRows); const float factor = 1.0f / (dx * dy); // This gives a genuine upsampling matrix, not the transpose of the downsampling matrix // const float factor = 1.0f; // Theoretically, this should be the best. //#pragma omp parallel for schedule(static) for (int y = 0; y < outRows; y++) { const float sy = y * dy; const int iy1 = (y * inRows) / outRows; const int iy2 = imin(((y + 1) * inRows) / outRows, inRows - 1); for (int x = 0; x < outCols; x++) { const float sx = x * dx; const int ix1 = (x * inCols) / outCols; const int ix2 = imin(((x + 1) * inCols) / outCols, inCols - 1); out[x + y* outCols] = (((ix1 + 1) - sx) * ((iy1 + 1 - sy)) * in[ix1 + iy1 * inCols] + ((ix1 + 1) - sx) * (sy + dy - (iy1 + 1)) * in[ix1 + iy2 * inCols] + (sx + dx - (ix1 + 1)) * ((iy1 + 1 - sy)) * in[ix2 + iy1 * inCols] + (sx + dx - (ix1 + 1)) * (sy + dx - (iy1 + 1)) * in[ix2 + iy2 * inCols]) * factor; } } } void matrix_upsample_simple(const int outCols, const int outRows, const float* const in, float* const out) { //#pragma omp parallel for schedule(static) for (int y = 0; y < outRows; y++) { const int iy1 = y / 2; float* outp = out + y * outCols; const float* inp = in + iy1 * (outCols / 2); for (int x = 0; x < outCols; x += 2) { const int ix1 = x / 2; outp[x] = outp[x + 1] = inp[ix1]; } } } void matrix_upsample(const int outCols, const int outRows, const float* const in, float* const out) { if (outRows % 2 == 0 && outCols % 2 == 0) { matrix_upsample_simple(outCols, outRows, in, out); } else { matrix_upsample_full(outCols, outRows, in, out); } } // downsample the matrix void matrix_downsample_full(const int inCols, const int inRows, const float* data, float* res) { const int outRows = inRows / 2; const int outCols = inCols / 2; const float dx = (float)inCols / ((float)outCols); const float dy = (float)inRows / ((float)outRows); // New downsampling by Ed Brambley: // Experimental downsampling that assumes pixels are square and // integrates over each new pixel to find the average value of the // underlying pixels. // // Consider the original pixels laid out, and the new (larger) // pixels layed out over the top of them. Then the new value for // the larger pixels is just the integral over that pixel of what // shows through; i.e., the values of the pixels underneath // multiplied by how much of that pixel is showing. // // (ix1, iy1) is the coordinate of the top left visible pixel. // (ix2, iy2) is the coordinate of the bottom right visible pixel. // (fx1, fy1) is the fraction of the top left pixel showing. // (fx2, fy2) is the fraction of the bottom right pixel showing. const float normalize = 1.0f / (dx * dy); //#pragma omp parallel for schedule(static) for (int y = 0; y < outRows; y++) { const int iy1 = (y * inRows) / outRows; const int iy2 = ((y + 1) * inRows) / outRows; const float fy1 = (iy1 + 1) - y * dy; const float fy2 = (y + 1) * dy - iy2; for (int x = 0; x < outCols; ++x) { const int ix1 = (x * inCols) / outCols; const int ix2 = ((x + 1) * inCols) / outCols; const float fx1 = (ix1 + 1) - x * dx; const float fx2 = (x + 1) * dx - ix2; float pixVal = 0.0f; float factorx, factory; for (int i = iy1; i <= iy2 && i < inRows; i++) { if (i == iy1) { factory = fy1; } // We're just getting the bottom edge of this pixel else if (i == iy2) { factory = fy2; } // We're just gettting the top edge of this pixel else { factory = 1.0f; } // We've got the full height of this pixel for (int j = ix1; j <= ix2 && j < inCols; j++) { if (j == ix1) { factorx = fx1; } // We've just got the right edge of this pixel else if (j == ix2) { factorx = fx2; } // We've just got the left edge of this pixel else { factorx = 1.0f; } // We've got the full width of this pixel pixVal += data[j + i * inCols] * factorx * factory; } } res[x + y* outCols] = pixVal * normalize; // Normalize by the area of the new pixel } } } void matrix_downsample_simple(const int inCols, const int inRows, const float* const data, float* const res) { const int outRows = inRows / 2; const int outCols = inCols / 2; // Simplified downsampling by Bruce Guenter: // // Follows exactly the same math as the full downsampling above, // except that inRows and inCols are known to be even. This allows // for all of the boundary cases to be eliminated, reducing the // sampling to a simple average. //#pragma omp parallel for schedule(static) for (int y = 0; y < outRows; y++) { const int iy1 = y * 2; const float* datap = data + iy1 * inCols; float* resp = res + y * outCols; for (int x = 0; x < outCols; x++) { const int ix1 = x * 2; resp[x] = (datap[ix1] + datap[ix1 + 1] + datap[ix1 + inCols] + datap[ix1 + 1 + inCols]) / 4.0f; } } } void matrix_downsample(const int inCols, const int inRows, const float* const data, float* const res) { if (inCols % 2 == 0 && inRows % 2 == 0) { matrix_downsample_simple(inCols, inRows, data, res); } else { matrix_downsample_full(inCols, inRows, data, res); } } // return = a + b inline void matrix_add(const int n, const float* const a, float* const b) { //#pragma omp parallel for schedule(static) for (int i = 0; i < n; i++) { b[i] += a[i]; } } // return = a - b inline void matrix_subtract(const int n, const float* const a, float* const b) { //#pragma omp parallel for schedule(static) for (int i = 0; i < n; i++) { b[i] = a[i] - b[i]; } } // copy matix a to b, return = a inline void matrix_copy(const int n, const float* const a, float* const b) { memcpy(b, a, sizeof(float)*n); } // multiply matrix a by scalar val inline void matrix_multiply_const(const int n, float* const a, const float val) { //#pragma omp parallel for schedule(static) for (int i = 0; i < n; i++) { a[i] *= val; } } // b = a[i] / b[i] inline void matrix_divide(const int n, float* a, float* b) { //#pragma omp parallel for schedule(static) for (int i = 0; i < n; i++) { b[i] = a[i] / b[i]; } } // alloc memory for the float table inline float* matrix_alloc(int size) { float* m = (float*)malloc(sizeof(float) * size); if (m == NULL) { fprintf(stderr, "ERROR: malloc in matrix_alloc() (size:%d)", size); exit((int)155); } return m; } // free memory for matrix inline void matrix_free(float* m) { if (m != NULL) { free(m); //_mm_free(m); m = NULL; } else { fprintf(stderr, "ERROR: This pointer has already been freed"); } } // multiply vector by vector (each vector should have one dimension equal to 1) float matrix_DotProduct(const int n, const float* a, const float* b) { float val = 0; //#pragma omp parallel for reduction(+:val) schedule(static) for (int j = 0; j < n; ++j) { val += a[j] * b[j]; } return val; } // set zeros for matrix elements inline void matrix_zero(int n, float* m) { //bzero(m, n*sizeof(float)); memset(m, 0, n * sizeof(float)); } // Davide Anastasia <davideanastasia@users.sourceforge.net> (2010 08 31) // calculate divergence of two gradient maps (Gx and Gy) // divG(x,y) = Gx(x,y) - Gx(x-1,y) + Gy(x,y) - Gy(x,y-1) void calculate_and_add_divergence(const int COLS, const int ROWS, const float* Gx, const float* Gy, float* divG) { float divGx, divGy; //#pragma omp parallel sections private(divGx, divGy) { //#pragma omp section { // kx = 0 AND ky = 0; divG[0] += Gx[0] + Gy[0]; // OUT // ky = 0 for (int kx = 1; kx < COLS; kx++) { divGx = Gx[kx] - Gx[kx - 1]; divGy = Gy[kx]; divG[kx] += divGx + divGy; // OUT } } //#pragma omp section { //#pragma omp parallel for schedule(static, 5120) private(divGx, divGy) for (int ky = 1; ky < ROWS; ky++) { // kx = 0 divGx = Gx[ky * COLS]; divGy = Gy[ky * COLS] - Gy[ky * COLS - COLS]; divG[ky* COLS] += divGx + divGy; // OUT // kx > 0 for (int kx = 1; kx < COLS; kx++) { divGx = Gx[kx + ky * COLS] - Gx[kx + ky * COLS - 1]; divGy = Gy[kx + ky * COLS] - Gy[kx + ky * COLS - COLS]; divG[kx + ky* COLS] += divGx + divGy; // OUT } } } } // END PARALLEL SECTIONS } // Calculate the sum of divergences for the all pyramid level // The smaller divergence map is upsampled and added to the divergence map for the higher level of pyramid void pyramid_calculate_divergence_sum(pyramid_t* pyramid, float* divG_sum) { float* temp = matrix_alloc((pyramid->rows * pyramid->cols) / 4); // Find the coarsest pyramid, and the number of pyramid levels bool swap = true; while (pyramid->next != NULL) { swap = (!swap); pyramid = pyramid->next; } // For every level, we swap temp and divG_sum // So, if there are an odd number of levels... if (swap) { swap_pointers(divG_sum, temp); } if (pyramid) { matrix_zero(pyramid->rows * pyramid->cols, temp); calculate_and_add_divergence(pyramid->cols, pyramid->rows, pyramid->Gx, pyramid->Gy, temp); swap_pointers(divG_sum, temp); pyramid = pyramid->prev; } while (pyramid) { matrix_upsample(pyramid->cols, pyramid->rows, divG_sum, temp); calculate_and_add_divergence(pyramid->cols, pyramid->rows, pyramid->Gx, pyramid->Gy, temp); swap_pointers(divG_sum, temp); pyramid = pyramid->prev; } matrix_free(temp); } // calculate scale factors (Cx,Cy) for gradients (Gx,Gy) // C is equal to EDGE_WEIGHT for gradients smaller than GFIXATE or 1.0 otherwise inline void calculate_scale_factor(const int n, const float* const G, float* const C) { // float GFIXATE = 0.1f; // float EDGE_WEIGHT = 0.01f; const float detectT = 0.001f; const float a = 0.038737f; const float b = 0.537756f; //#pragma omp parallel for schedule(static) for (int i = 0; i < n; i++) { //#if 1 const float g = max(detectT, fabsf(G[i])); C[i] = 1.0f / (a * powf(g, b)); //#else // if(fabsf(G[i]) < GFIXATE) // C[i] = 1.0f / EDGE_WEIGHT; // else // C[i] = 1.0f; //#endif } } // calculate scale factor for the whole pyramid void pyramid_calculate_scale_factor(pyramid_t* pyramid, pyramid_t* pC) { while (pyramid != NULL) { calculate_scale_factor(pyramid->rows * pyramid->cols, pyramid->Gx, pC->Gx); calculate_scale_factor(pyramid->rows * pyramid->cols, pyramid->Gy, pC->Gy); pyramid = pyramid->next; pC = pC->next; } } // Scale gradient (Gx and Gy) by C (Cx and Cy) // G = G * C inline void scale_gradient(const int n, float* G, const float* C) { //#pragma omp parallel for schedule(static) for (int i = 0; i < n; i++) { G[i] *= C[i]; } } // scale gradients for the whole one pyramid with the use of (Cx,Cy) from the other pyramid void pyramid_scale_gradient(pyramid_t* pyramid, pyramid_t* pC) { while (pyramid != NULL) { scale_gradient(pyramid->rows * pyramid->cols, pyramid->Gx, pC->Gx); scale_gradient(pyramid->rows * pyramid->cols, pyramid->Gy, pC->Gy); pyramid = pyramid->next; pC = pC->next; } } // free memory allocated for the pyramid void pyramid_free(pyramid_t* pyramid) { pyramid_t* t_next; // = pyramid->next; while (pyramid) { t_next = pyramid->next; if (pyramid->Gx != NULL) { matrix_free(pyramid->Gx); //free(pyramid->Gx); pyramid->Gx = NULL; } if (pyramid->Gy != NULL) { matrix_free(pyramid->Gy); //free(pyramid->Gy); pyramid->Gy = NULL; } //pyramid->prev = NULL; //pyramid->next = NULL; free(pyramid); pyramid = t_next; } } // allocate memory for the pyramid pyramid_t* pyramid_allocate(int cols, int rows) { pyramid_t* level = NULL; pyramid_t* pyramid = NULL; pyramid_t* prev = NULL; while (rows >= PYRAMID_MIN_PIXELS && cols >= PYRAMID_MIN_PIXELS) { level = (pyramid_t*) malloc(sizeof(pyramid_t)); if (level == NULL) { fprintf(stderr, "ERROR: malloc in pyramid_alloc() (size:%zu)", sizeof(pyramid_t)); exit((int)155); } memset(level, 0, sizeof(pyramid_t)); level->rows = rows; level->cols = cols; const int size = level->rows * level->cols; level->Gx = matrix_alloc(size); level->Gy = matrix_alloc(size); level->prev = prev; if (prev != NULL) { prev->next = level; } prev = level; if (pyramid == NULL) { pyramid = level; } rows /= 2; cols /= 2; } return pyramid; } // calculate gradients //TODO: check this implementation in Linux, where the OMP is enabled! inline void calculate_gradient(const int COLS, const int ROWS, const float* const lum, float* const Gx, float* const Gy) { int Y_IDX, IDX; //#pragma omp parallel for schedule(static) private(Y_IDX, IDX) for (int ky = 0; ky < ROWS - 1; ky++) { Y_IDX = ky * COLS; for (int kx = 0; kx < COLS - 1; kx++) { IDX = Y_IDX + kx; Gx[IDX] = lum[IDX + 1] - lum[IDX]; Gy[IDX] = lum[IDX + COLS] - lum[IDX]; } Gx[Y_IDX + COLS - 1] = 0.0f; // last columns (kx = COLS - 1) Gy[Y_IDX + COLS - 1] = lum[Y_IDX + COLS - 1 + COLS] - lum[Y_IDX + COLS - 1]; } // last row (ky = ROWS-1) for (int kx = 0; kx < (COLS - 1); kx++) { IDX = (ROWS - 1) * COLS + kx; Gx[IDX] = lum[IDX + 1] - lum[IDX]; Gy[IDX] = 0.0f; } // last row & last col = last element Gx[ROWS* COLS - 1] = 0.0f; Gy[ROWS* COLS - 1] = 0.0f; } void swap_pointers(float* &pOne, float* &pTwo) { float* pTemp = pOne; pOne = pTwo; pTwo = pTemp; } // calculate gradients for the pyramid // lum_temp WILL NOT BE overwritten! void pyramid_calculate_gradient(const pyramid_t* pyramid, const float* Y /*lum_temp*/) { float* buffer1 = matrix_alloc((pyramid->rows * pyramid->cols) / 4); // /4 float* buffer2 = matrix_alloc((pyramid->rows * pyramid->cols) / 16); // /16 float* p_t1 = buffer1; float* p_t2 = buffer2; calculate_gradient(pyramid->cols, pyramid->rows, Y, pyramid->Gx, pyramid->Gy); pyramid_t* py_curr = pyramid->next; pyramid_t* py_prev = py_curr->prev; if (py_curr) { matrix_downsample(py_prev->cols, py_prev->rows, Y, p_t1); calculate_gradient(py_curr->cols, py_curr->rows, p_t1, py_curr->Gx, py_curr->Gy); py_prev = py_curr; py_curr = py_curr->next; } while (py_curr) { matrix_downsample(py_prev->cols, py_prev->rows, p_t1, p_t2); calculate_gradient(py_curr->cols, py_curr->rows, p_t2, py_curr->Gx, py_curr->Gy); // swap pointers swap_pointers(p_t1, p_t2); py_prev = py_curr; py_curr = py_curr->next; } matrix_free(buffer1); matrix_free(buffer2); } // x = -0.25 * b inline void solveX(const int n, const float* const b, float* const x) { //#pragma omp parallel for schedule(static) for (int i = 0; i < n; i++) { x[i] = (-0.25f) * b[i]; } } // divG_sum = A * x = sum(divG(x)) inline void multiplyA(pyramid_t* px, pyramid_t* pC, const float* const x, float* divG_sum) { pyramid_calculate_gradient(px, x); // x won't be changed pyramid_scale_gradient(px, pC); // scale gradients by Cx,Cy from main pyramid pyramid_calculate_divergence_sum(px, divG_sum); // calculate the sum of divergences } // bi-conjugate linear equation solver // overwrites pyramid! void linbcg(pyramid_t* pyramid, pyramid_t* pC, float* b, float* x, const int itmax, const float tol) { const int rows = pyramid->rows; const int cols = pyramid->cols; const int n = rows * cols; const float tol2 = tol * tol; float* const z = matrix_alloc(n); float* const zz = matrix_alloc(n); float* const p = matrix_alloc(n); float* const pp = matrix_alloc(n); float* const r = matrix_alloc(n); float* const rr = matrix_alloc(n); float* const x_save = matrix_alloc(n); const float bnrm2 = matrix_DotProduct(n, b, b); multiplyA(pyramid, pC, x, r); // r = A*x = divergence(x) matrix_subtract(n, b, r); // r = b - r float err2 = matrix_DotProduct(n, r, r); // err2 = r.r multiplyA(pyramid, pC, r, rr); // rr = A*r float bkden = 0; float saved_err2 = err2; matrix_copy(n, x, x_save); // const float ierr2 = err2; // const float percent_sf = 100.0f / logf(tol2 * bnrm2 / ierr2); int iter = 0; bool reset = true; int num_backwards = 0; const int num_backwards_ceiling = 3; for (; iter < itmax; ++iter) { // ph->newValue( (int) (logf(err2 / ierr2)*percent_sf) ); // if (ph->isTerminationRequested()) { //user request abort // break; // } solveX(n, r, z); // z = ~A(-1) * r = -0.25 * r solveX(n, rr, zz); // zz = ~A(-1) * rr = -0.25 * rr const float bknum = matrix_DotProduct(n, z, rr); if (reset) { reset = false; matrix_copy(n, z, p); matrix_copy(n, zz, pp); } else { const float bk = bknum / bkden; // beta = ... //#pragma omp parallel for schedule(static) for (int i = 0; i < n; i++) { p[i] = z[i] + bk * p[i]; pp[i] = zz[i] + bk * pp[i]; } } bkden = bknum; // numerato becomes the dominator for the next iteration // slow! multiplyA(pyramid, pC, p, z); // z = A* p = divergence( p) multiplyA(pyramid, pC, pp, zz); // zz = A*pp = divergence(pp) const float ak = bknum / matrix_DotProduct(n, z, pp); // alfa = ... //#pragma omp parallel for schedule(static) for (int i = 0 ; i < n ; i++) { r[i] -= ak * z[i]; // r = r - alfa * z rr[i] -= ak * zz[i]; //rr = rr - alfa * zz } const float old_err2 = err2; err2 = matrix_DotProduct(n, r, r); // Have we gone unstable? if (err2 > old_err2) { // Save where we've got to if it's the best yet if (num_backwards == 0 && old_err2 < saved_err2) { saved_err2 = old_err2; matrix_copy(n, x, x_save); } num_backwards++; } else { num_backwards = 0; } //#pragma omp parallel for schedule(static) for (int i = 0 ; i < n ; i++) { x[i] += ak * p[i]; // x = x + alfa * p } if (num_backwards > num_backwards_ceiling) { // Reset reset = true; num_backwards = 0; // Recover saved value matrix_copy(n, x_save, x); // r = Ax multiplyA(pyramid, pC, x, r); // r = b - r matrix_subtract(n, b, r); // err2 = r.r err2 = matrix_DotProduct(n, r, r); saved_err2 = err2; // rr = A*r multiplyA(pyramid, pC, r, rr); } // fprintf(stderr, "iter:%d err:%f\n", iter+1, sqrtf(err2/bnrm2)); if (err2 / bnrm2 < tol2) { break; } } // Use the best version we found if (err2 > saved_err2) { err2 = saved_err2; matrix_copy(n, x_save, x); } if (err2 / bnrm2 > tol2) { // Not converged // ph->newValue( (int) (logf(err2 / ierr2)*percent_sf)); if (iter == itmax) { MSG(" Warning: Not converged (hit maximum iterations), error = %f (should be below %f).\n", sqrtf(err2 / bnrm2), tol); } else { MSG(" Warning: Not converged (going unstable), error = %f (should be below %f).\n", sqrtf(err2 / bnrm2), tol); } } else { // ph->newValue(100); } matrix_free(x_save); matrix_free(p); matrix_free(pp); matrix_free(z); matrix_free(zz); matrix_free(r); matrix_free(rr); } // conjugate linear equation solver // overwrites pyramid! // This version is a slightly modified version by Davide Anastasia <davideanastasia@users.sourceforge.net> // March 25, 2011 void lincg(pyramid_t* pyramid, pyramid_t* pC, const float* const b, float* const x, const int itmax, const float tol) { const int num_backwards_ceiling = 3; float rdotr_curr, rdotr_prev, rdotr_best; float alpha, beta; #ifdef TIMER_PROFILING msec_timer f_timer; f_timer.start(); #endif const int rows = pyramid->rows; const int cols = pyramid->cols; const int n = rows * cols; const float tol2 = tol * tol; float* const x_best = matrix_alloc(n); float* const r = matrix_alloc(n); float* const p = matrix_alloc(n); float* const Ap = matrix_alloc(n); // bnrm2 = ||b|| const float bnrm2 = matrix_DotProduct(n, b, b); // r = b - Ax multiplyA(pyramid, pC, x, r); // r = A x matrix_subtract(n, b, r); // r = b - r // rdotr = r.r rdotr_best = rdotr_curr = matrix_DotProduct(n, r, r); // Setup initial vector matrix_copy(n, r, p); // p = r matrix_copy(n, x, x_best); // const float irdotr = rdotr; // const float percent_sf = 100.0f / logf(tol2 * bnrm2 / irdotr); int iter = 0; int num_backwards = 0; for (; iter < itmax; iter++) { // TEST //ph->newValue((int)(logf(rdotr_curr / irdotr)*percent_sf)); // User requested abort //if (ph->isTerminationRequested() && iter > 0) { // break; //} // Ap = A p multiplyA(pyramid, pC, p, Ap); // alpha = r.r / (p . Ap) alpha = rdotr_curr / matrix_DotProduct(n, p, Ap); // r = r - alpha Ap //#pragma omp parallel for schedule(static) for (int i = 0; i < n; i++) { r[i] -= alpha * Ap[i]; } // rdotr = r.r rdotr_prev = rdotr_curr; rdotr_curr = matrix_DotProduct(n, r, r); // Have we gone unstable? if (rdotr_curr > rdotr_prev) { // Save where we've got to if (num_backwards == 0 && rdotr_prev < rdotr_best) { rdotr_best = rdotr_prev; matrix_copy(n, x, x_best); } num_backwards++; } else { num_backwards = 0; } // x = x + alpha * p //#pragma omp parallel for schedule(static) for (int i = 0; i < n; i++) { x[i] += alpha * p[i]; } // Exit if we're done // fprintf(stderr, "iter:%d err:%f\n", iter+1, sqrtf(rdotr/bnrm2)); if (rdotr_curr / bnrm2 < tol2) { break; } if (num_backwards > num_backwards_ceiling) { // Reset num_backwards = 0; matrix_copy(n, x_best, x); // r = Ax multiplyA(pyramid, pC, x, r); // r = b - r matrix_subtract(n, b, r); // rdotr = r.r rdotr_best = rdotr_curr = matrix_DotProduct(n, r, r); // p = r matrix_copy(n, r, p); } else { // p = r + beta p beta = rdotr_curr / rdotr_prev; //#pragma omp parallel for schedule(static) for (int i = 0; i < n; i++) { p[i] = r[i] + beta * p[i]; } } } // Use the best version we found if (rdotr_curr > rdotr_best) { rdotr_curr = rdotr_best; matrix_copy(n, x_best, x); } if (rdotr_curr / bnrm2 > tol2) { // Not converged //ph->newValue((int)(logf(rdotr_curr / irdotr)*percent_sf)); if (iter == itmax) { MSG(" Warning: Not converged (hit maximum iterations), error = %f (should be below %f).\n", sqrtf(rdotr_curr / bnrm2), tol); } else { MSG(" Warning: Not converged (going unstable), error = %f (should be below %f).\n", sqrtf(rdotr_curr / bnrm2), tol); } } else { // ph->newValue(100); } matrix_free(x_best); matrix_free(p); matrix_free(Ap); matrix_free(r); } // conjugate linear equation solver // overwrites pyramid! //void lincg(pyramid_t* pyramid, pyramid_t* pC, const float* const b, float* const x, const int itmax, const float tol, ProgressHelper *ph) //{ // const int num_backwards_ceiling = 3; // //#ifdef TIMER_PROFILING // //msec_timer f_timer; // //f_timer.start(); //#endif // // const int rows = pyramid->rows; // const int cols = pyramid->cols; // const int n = rows*cols; // const float tol2 = tol*tol; // // float* const x_save = matrix_alloc(n); // float* const r = matrix_alloc(n); // float* const p = matrix_alloc(n); // float* const Ap = matrix_alloc(n); // // // bnrm2 = ||b|| // const float bnrm2 = matrix_DotProduct(n, b, b); // // // r = b - Ax // multiplyA(pyramid, pC, x, r); // matrix_subtract(n, b, r); // // // rdotr = r.r // float rdotr = matrix_DotProduct(n, r, r); // // // p = r // matrix_copy(n, r, p); // // // Setup initial vector // float saved_rdotr = rdotr; // matrix_copy(n, x, x_save); // // const float irdotr = rdotr; // const float percent_sf = 100.0f/logf(tol2*bnrm2/irdotr); // int iter = 0; // int num_backwards = 0; // // for (; iter < itmax; iter++) // { // // TEST // ph->newValue( (int) (logf(rdotr/irdotr)*percent_sf) ); // if (ph->isTerminationRequested() && iter > 0 ) // User requested abort // break; // // // Ap = A p // multiplyA(pyramid, pC, p, Ap); // // // alpha = r.r / (p . Ap) // const float alpha = rdotr / matrix_DotProduct(n, p, Ap); // // // r = r - alpha Ap //#ifdef __SSE__ // VEX_vsubs(r, alpha, Ap, r, n); //#else // //#pragma omp parallel for schedule(static) // for (int i = 0; i < n; i++) // r[i] -= alpha * Ap[i]; //#endif // // // rdotr = r.r // const float old_rdotr = rdotr; // rdotr = matrix_DotProduct(n, r, r); // // // Have we gone unstable? // if (rdotr > old_rdotr) // { // // Save where we've got to // if (num_backwards == 0 && old_rdotr < saved_rdotr) // { // saved_rdotr = old_rdotr; // matrix_copy(n, x, x_save); // } // // num_backwards++; // } // else // { // num_backwards = 0; // } // // // x = x + alpha * p //#ifdef __SSE__ // VEX_vadds(x, alpha, p, x, n); //#else // //#pragma omp parallel for schedule(static) // for (int i = 0; i < n; i++) // x[i] += alpha * p[i]; //#endif // // // Exit if we're done // // fprintf(stderr, "iter:%d err:%f\n", iter+1, sqrtf(rdotr/bnrm2)); // if (rdotr/bnrm2 < tol2) // break; // // if (num_backwards > num_backwards_ceiling) // { // // Reset // num_backwards = 0; // matrix_copy(n, x_save, x); // // // r = Ax // multiplyA(pyramid, pC, x, r); // // // r = b - r // matrix_subtract(n, b, r); // // // rdotr = r.r // rdotr = matrix_DotProduct(n, r, r); // saved_rdotr = rdotr; // // // p = r // matrix_copy(n, r, p); // } // else // { // // p = r + beta p // const float beta = rdotr/old_rdotr; //#ifdef __SSE__ // VEX_vadds(r, beta, p, p, n); //#else // //#pragma omp parallel for schedule(static) // for (int i = 0; i < n; i++) // p[i] = r[i] + beta*p[i]; //#endif // } // } // // // Use the best version we found // if (rdotr > saved_rdotr) // { // rdotr = saved_rdotr; // matrix_copy(n, x_save, x); // } // // if (rdotr/bnrm2 > tol2) // { // // Not converged // ph->newValue( (int) (logf(rdotr/irdotr)*percent_sf)); // if (iter == itmax) // fprintf(stderr, "\npfstmo_mantiuk06: Warning: Not converged (hit maximum iterations), error = %g (should be below %g).\n", sqrtf(rdotr/bnrm2), tol); // else // fprintf(stderr, "\npfstmo_mantiuk06: Warning: Not converged (going unstable), error = %g (should be below %g).\n", sqrtf(rdotr/bnrm2), tol); // } // else // ph->newValue(100); // // matrix_free(x_save); // matrix_free(p); // matrix_free(Ap); // matrix_free(r); // //#ifdef TIMER_PROFILING // //f_timer.stop_and_update(); // //std::cout << "lincg() = " << f_timer.get_time() << " msec" << std::endl; //#endif //} // in_tab and out_tab should contain inccreasing float values inline float lookup_table(const int n, const float* const in_tab, const float* const out_tab, const float val) { if ((val < in_tab[0])) { return out_tab[0]; } for (int j = 1; j < n; ++j) { if (val < in_tab[j]) { const float dd = (val - in_tab[j - 1]) / (in_tab[j] - in_tab[j - 1]); return out_tab[j - 1] + (out_tab[j] - out_tab[j - 1]) * dd; } } return out_tab[n - 1]; } // transform gradient (Gx,Gy) to R inline void transform_to_R(const int n, float* G, float detail_factor) { const float log10 = 2.3025850929940456840179914546844 * detail_factor; //#pragma omp parallel for schedule(static, 1024) for (int j = 0; j < n; j++) { // G to W float Curr_G = G[j]; if (Curr_G < 0.0f) { Curr_G = -(powf(10, (-Curr_G) * log10) - 1.0f); } else { Curr_G = (powf(10, Curr_G * log10) - 1.0f); } // W to RESP if (Curr_G < 0.0f) { Curr_G = -lookup_table(LOOKUP_W_TO_R, W_table, R_table, -Curr_G); } else { Curr_G = lookup_table(LOOKUP_W_TO_R, W_table, R_table, Curr_G); } G[j] = Curr_G; } } // transform from R to G inline void transform_to_G(const int n, float* const R, float detail_factor) { //here we are actually changing the base of logarithm const float log10 = 2.3025850929940456840179914546844 * detail_factor; //#pragma omp parallel for schedule(static,1024) for (int j = 0; j < n; j++) { float Curr_R = R[j]; // RESP to W if (Curr_R < 0.0f) { Curr_R = -lookup_table(LOOKUP_W_TO_R, R_table, W_table, -Curr_R); } else { Curr_R = lookup_table(LOOKUP_W_TO_R, R_table, W_table, Curr_R); } // W to G if (Curr_R < 0.0f) { Curr_R = -log((-Curr_R) + 1.0f) / log10; } else { Curr_R = log(Curr_R + 1.0f) / log10; } R[j] = Curr_R; } } // transform gradient (Gx,Gy) to R for the whole pyramid inline void pyramid_transform_to_R(pyramid_t* pyramid, float detail_factor) { while (pyramid != NULL) { transform_to_R(pyramid->rows * pyramid->cols, pyramid->Gx, detail_factor); transform_to_R(pyramid->rows * pyramid->cols, pyramid->Gy, detail_factor); pyramid = pyramid->next; } } // transform from R to G for the pyramid inline void pyramid_transform_to_G(pyramid_t* pyramid, float detail_factor) { while (pyramid != NULL) { transform_to_G(pyramid->rows * pyramid->cols, pyramid->Gx, detail_factor); transform_to_G(pyramid->rows * pyramid->cols, pyramid->Gy, detail_factor); pyramid = pyramid->next; } } // multiply gradient (Gx,Gy) values by float scalar value for the whole pyramid inline void pyramid_gradient_multiply(pyramid_t* pyramid, const float val) { while (pyramid != NULL) { matrix_multiply_const(pyramid->rows * pyramid->cols, pyramid->Gx, val); matrix_multiply_const(pyramid->rows * pyramid->cols, pyramid->Gy, val); pyramid = pyramid->next; } } int sort_float(const void* const v1, const void* const v2) { if (*((float*)v1) < *((float*)v2)) { return -1; } if ((*((float*)v1) > *((float*)v2))) { return 1; } return 0; } // transform gradients to luminance void transform_to_luminance(pyramid_t* pp, float* x, const bool bcg, const int itmax, const float tol) { pyramid_t* pC = pyramid_allocate(pp->cols, pp->rows); pyramid_calculate_scale_factor(pp, pC); // calculate (Cx,Cy) pyramid_scale_gradient(pp, pC); // scale small gradients by (Cx,Cy); float* b = matrix_alloc(pp->cols * pp->rows); pyramid_calculate_divergence_sum(pp, b); // calculate the sum of divergences (equal to b) MSG(" cg-lum-grad-solve"); // calculate luminances from gradients if (bcg) { linbcg(pp, pC, b, x, itmax, tol); } else { lincg(pp, pC, b, x, itmax, tol); } matrix_free(b); pyramid_free(pC); } struct hist_data { float size; float cdf; int index; }; int hist_data_order(const void* const v1, const void* const v2) { if (((struct hist_data*) v1)->size < ((struct hist_data*) v2)->size) { return -1; } if (((struct hist_data*) v1)->size > ((struct hist_data*) v2)->size) { return 1; } return 0; } int hist_data_index(const void* const v1, const void* const v2) { return ((struct hist_data*) v1)->index - ((struct hist_data*) v2)->index; } void contrast_equalization(pyramid_t* pp, const float contrastFactor) { // Count sizes int total_pixels = 0; pyramid_t* l = pp; while (l != NULL) { total_pixels += l->rows * l->cols; l = l->next; } // Allocate memory struct hist_data* hist = (struct hist_data*) malloc(sizeof(struct hist_data) * total_pixels); if (hist == NULL) { fprintf(stderr, "ERROR: malloc in contrast_equalization() (size:%zu)", sizeof(struct hist_data) * total_pixels); exit((int)155); } // Build histogram info l = pp; int index = 0; while (l != NULL) { const int pixels = l->rows * l->cols; const int offset = index; //#pragma omp parallel for schedule(static) for (int c = 0; c < pixels; ++c) { hist[c + offset].size = sqrtf(l->Gx[c] * l->Gx[c] + l->Gy[c] * l->Gy[c]); hist[c + offset].index = c + offset; } index += pixels; l = l->next; } // Generate histogram qsort(hist, total_pixels, sizeof(struct hist_data), hist_data_order); // Calculate cdf const float norm = 1.0f / (float) total_pixels; //#pragma omp parallel for schedule(static) for (int i = 0; i < total_pixels; ++i) { hist[i].cdf = ((float) i) * norm; } // Recalculate in terms of indexes qsort(hist, total_pixels, sizeof(struct hist_data), hist_data_index); //Remap gradient magnitudes l = pp; index = 0; while (l != NULL) { const int pixels = l->rows * l->cols; const int offset = index; //#pragma omp parallel for schedule(static) for (int c = 0; c < pixels; ++c) { const float scale = contrastFactor * hist[c + offset].cdf / hist[c + offset].size; l->Gx[c] *= scale; l->Gy[c] *= scale; } index += pixels; l = l->next; } free(hist); } // tone mapping /** * @brief: Tone mapping algorithm [Mantiuk2006] * * @param R red channel * @param G green channel * @param B blue channel * @param Y luminance channel * @param contrastFactor contrast scaling factor (in 0-1 range) * @param saturationFactor color desaturation (in 0-1 range) * @param bcg true if to use BiConjugate Gradients, false if to use Conjugate Gradients * @param itmax maximum number of iterations for convergence (typically 50) * @param tol tolerence to get within for convergence (typically 1e-3) * @return PFSTMO_OK if tone-mapping was sucessful, PFSTMO_ABORTED if * it was stopped from a callback function and PFSTMO_ERROR if an * error was encountered. */ int tmo_mantiuk06_contmap(int c, int r, float* R, float* G, float* B, float* Y, const float contrastFactor, const float saturationFactor, float detailfactor, const bool bcg, const int itmax, const float tol) { const int n = c * r ; MSG(" normalize"); /* Normalize */ float Ymax = Y[0]; for (int j = 1; j < n; ++j) { if (Y[j] > Ymax) { Ymax = Y[j]; } } const float clip_min = 1e-7f * Ymax; //#pragma omp parallel for schedule(static) for (int j = 0; j < n; ++j) { if ((R[j] < clip_min)) { R[j] = clip_min; } if ((G[j] < clip_min)) { G[j] = clip_min; } if ((B[j] < clip_min)) { B[j] = clip_min; } if ((Y[j] < clip_min)) { Y[j] = clip_min; } } //#pragma omp parallel for schedule(static) for (int j = 0; j < n; ++j) { R[j] /= Y[j]; G[j] /= Y[j]; B[j] /= Y[j]; Y[j] = log10f(Y[j]); } MSG(" gradient-pyramid"); pyramid_t* pp = pyramid_allocate(c, r); // create pyramid pyramid_calculate_gradient(pp, Y); // calculate gradients for pyramid (Y won't be changed) pyramid_transform_to_R(pp, detailfactor); // transform gradients to R MSG(" contrast-pyramid"); /* Contrast map */ if (contrastFactor > 0.0f) { pyramid_gradient_multiply(pp, contrastFactor); // Contrast mapping } else { contrast_equalization(pp, -contrastFactor); // Contrast equalization } MSG(" pyramid-transform"); pyramid_transform_to_G(pp, detailfactor); // transform R to gradients MSG(" contrast-luminance"); transform_to_luminance(pp, Y, bcg, itmax, tol); // transform gradients to luminance Y pyramid_free(pp); MSG(" re-normalize"); /* Renormalize luminance */ float* temp = matrix_alloc(n); matrix_copy(n, Y, temp); // copy Y to temp qsort(temp, n, sizeof(float), sort_float); // sort temp in ascending order const float CUT_MARGIN = 0.1f; float trim; float delta; trim = (n - 1) * CUT_MARGIN * 0.01f; delta = trim - floorf(trim); const float l_min = temp[(int)floorf(trim)] * delta + temp[(int)ceilf(trim)] * (1.0f - delta); trim = (n - 1) * (100.0f - CUT_MARGIN) * 0.01f; delta = trim - floorf(trim); const float l_max = temp[(int)floorf(trim)] * delta + temp[(int)ceilf(trim)] * (1.0f - delta); matrix_free(temp); const float disp_dyn_range = 2.3f; //#pragma omp parallel for schedule(static) for (int j = 0; j < n; ++j) { Y[j] = (Y[j] - l_min) / (l_max - l_min) * disp_dyn_range - disp_dyn_range; // x scaled } MSG(" rgb convert"); /* Transform to linear scale RGB */ //#pragma omp parallel for schedule(static) for (int j = 0; j < n; ++j) { Y[j] = powf(10, Y[j]); R[j] = powf(R[j], saturationFactor) * Y[j]; G[j] = powf(G[j], saturationFactor) * Y[j]; B[j] = powf(B[j], saturationFactor) * Y[j]; } return 1;//PFSTMO_OK; } //#endif // __TMO_H__ /** * @brief Frederic Drago logmapping operator * * Adaptive logarithmic mapping for displaying high contrast * scenes. * F. Drago, K. Myszkowski, T. Annen, and N. Chiba. In Eurographics 2003. * * This file is a part of LuminanceHDR package, based on pfstmo. * ---------------------------------------------------------------------- * Copyright (C) 2003,2004 Grzegorz Krawczyk * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * ---------------------------------------------------------------------- * * @author Grzegorz Krawczyk, <krawczyk@mpi-sb.mpg.de> * * $Id: tmo_drago03.cpp,v 1.4 2008/11/04 23:43:08 rafm Exp $ */ #include <math.h> // #include "TonemappingOperators/pfstmo.h" // #include "tmo_drago03.h" /// Type of algorithm #define FAST 0 inline float biasFunc(float b, float x) { return powf(x, b); // pow(x, log(bias)/log(0.5) } //------------------------------------------- void calculateLuminance(unsigned int width, unsigned int height, const float* Y, float& avLum, float& maxLum) { avLum = 0.0f; maxLum = 0.0f; int size = width * height; for (int i = 0 ; i < size; i++) { avLum += log(Y[i] + 1e-4); maxLum = (Y[i] > maxLum) ? Y[i] : maxLum ; } avLum = exp((float)(avLum / size)); } // Y = in | L = out void tmo_drago03(unsigned int width, unsigned int height, const float* Y, float* L, float bias ) { const float LOG05 = -0.693147f; // log(0.5) float maxLum, avLum; calculateLuminance(width, height, Y, avLum, maxLum); int nrows = height; // image size int ncols = width; maxLum /= avLum; // normalize maximum luminance by average luminance float divider = log10(maxLum + 1.0f); float biasP = logf(bias) / LOG05; #if !FAST // Normal tone mapping of every pixel for (int y = 0 ; y < nrows; y++) { // ph->newValue(100 * y / nrows); // if (ph->isTerminationRequested()) { // break; // } for (int x = 0 ; x < ncols; x++) { float Yw = Y[x + y * width] / avLum; float interpol = logf(2.0f + biasFunc(biasP, Yw / maxLum) * 8.0f); L[x + y* width] = (logf(Yw + 1.0f) / interpol) / divider; } } #else // Approximation of log(x+1) // x(6+x)/(6+4x) good if x < 1 // x*(6 + 0.7662x)/(5.9897 + 3.7658x) between 1 and 2 // http://users.pandora.be/martin.brown/home/consult/logx.htm int i, j; for (int y = 0; y < nrows; y += 3) { for (int x = 0; x < ncols; x += 3) { float average = 0.0f; for (i = 0; i < 3; i++) for (j = 0; j < 3; j++) { average += (*Y)(x + i, y + j) / avLum; } average = average / 9.0f - (*Y)(x, y); if (average > -1.0f && average < 1.0f) { float interpol = log(2.0f + biasFunc(biasP, (*Y)(x + 1, y + 1) / maxLum) * 8.0f); for (i = 0; i < 3; i++) for (j = 0; j < 3; j++) { float Yw = (*Y)(x + i, y + j); if (Yw < 1.0f) { float L = Yw * (6.0f + Yw) / (6.0f + 4.0f * Yw); Yw = (L / interpol) / divider; } else if (Yw >= 1.0f && Yw < 2.0f) { float L = Yw * (6.0f + 0.7662 * Yw) / (5.9897f + 3.7658f * Yw); Yw = (L / interpol) / divider; } else { Yw = (log(Yw + 1.0f) / interpol) / divider; } (*L)(x + i, y + j) = Yw; } } else { for (i = 0; i < 3; i++) for (j = 0; j < 3; j++) { float Yw = (*Y)(x + i, y + j); float interpol = log(2.0f + biasFunc(biasP, Yw / maxLum) * 8.0f); (*L)(x + i, y + j) = (log(Yw + 1.0f) / interpol) / divider; } } } } #endif // #else #ifndef FAST }
plot.h
#ifndef OPENMC_PLOT_H #define OPENMC_PLOT_H #include <unordered_map> #include <sstream> #include "pugixml.hpp" #include "xtensor/xarray.hpp" #include "hdf5.h" #include "openmc/position.h" #include "openmc/constants.h" #include "openmc/cell.h" #include "openmc/geometry.h" #include "openmc/particle.h" #include "openmc/xml_interface.h" namespace openmc { //=============================================================================== // Global variables //=============================================================================== class Plot; namespace model { extern std::vector<Plot> plots; //!< Plot instance container extern std::unordered_map<int, int> plot_map; //!< map of plot ids to index } // namespace model //=============================================================================== // RGBColor holds color information for plotted objects //=============================================================================== struct RGBColor { //Constructors RGBColor() : red(0), green(0), blue(0) { }; RGBColor(const int v[3]) : red(v[0]), green(v[1]), blue(v[2]) { }; RGBColor(int r, int g, int b) : red(r), green(g), blue(b) { }; RGBColor(const std::vector<int> &v) { if (v.size() != 3) { throw std::out_of_range("Incorrect vector size for RGBColor."); } red = v[0]; green = v[1]; blue = v[2]; } bool operator ==(const RGBColor& other) { return red == other.red && green == other.green && blue == other.blue; } // Members uint8_t red, green, blue; }; // some default colors const RGBColor WHITE {255, 255, 255}; const RGBColor RED {255, 0, 0}; typedef xt::xtensor<RGBColor, 2> ImageData; struct IdData { // Constructor IdData(size_t h_res, size_t v_res); // Methods void set_value(size_t y, size_t x, const Particle& p, int level); void set_overlap(size_t y, size_t x); // Members xt::xtensor<int32_t, 3> data_; //!< 2D array of cell & material ids }; struct PropertyData { // Constructor PropertyData(size_t h_res, size_t v_res); // Methods void set_value(size_t y, size_t x, const Particle& p, int level); void set_overlap(size_t y, size_t x); // Members xt::xtensor<double, 3> data_; //!< 2D array of temperature & density data }; enum class PlotType { slice = 1, voxel = 2 }; enum class PlotBasis { xy = 1, xz = 2, yz = 3 }; enum class PlotColorBy { cells = 0, mats = 1 }; //=============================================================================== // Plot class //=============================================================================== class PlotBase { public: template<class T> T get_map() const; // Members public: Position origin_; //!< Plot origin in geometry Position width_; //!< Plot width in geometry PlotBasis basis_; //!< Plot basis (XY/XZ/YZ) std::array<size_t, 3> pixels_; //!< Plot size in pixels bool color_overlaps_; //!< Show overlapping cells? int level_; //!< Plot universe level }; template<class T> T PlotBase::get_map() const { size_t width = pixels_[0]; size_t height = pixels_[1]; // get pixel size double in_pixel = (width_[0])/static_cast<double>(width); double out_pixel = (width_[1])/static_cast<double>(height); // size data array T data(width, height); // setup basis indices and initial position centered on pixel int in_i, out_i; Position xyz = origin_; switch(basis_) { case PlotBasis::xy : in_i = 0; out_i = 1; break; case PlotBasis::xz : in_i = 0; out_i = 2; break; case PlotBasis::yz : in_i = 1; out_i = 2; break; #ifdef __GNUC__ default: __builtin_unreachable(); #endif } // set initial position xyz[in_i] = origin_[in_i] - width_[0] / 2. + in_pixel / 2.; xyz[out_i] = origin_[out_i] + width_[1] / 2. - out_pixel / 2.; // arbitrary direction Direction dir = {0.7071, 0.7071, 0.0}; #pragma omp parallel { Particle p; p.r() = xyz; p.u() = dir; p.coord_[0].universe = model::root_universe; int level = level_; int j{}; #pragma omp for for (int y = 0; y < height; y++) { p.r()[out_i] = xyz[out_i] - out_pixel * y; for (int x = 0; x < width; x++) { p.r()[in_i] = xyz[in_i] + in_pixel * x; p.n_coord_ = 1; // local variables bool found_cell = find_cell(&p, 0); j = p.n_coord_ - 1; if (level >=0) {j = level + 1;} if (found_cell) { data.set_value(y, x, p, j); } if (color_overlaps_ && check_cell_overlap(&p, false)) { data.set_overlap(y, x); } } // inner for } // outer for } // omp parallel return data; } class Plot : public PlotBase { public: // Constructor Plot(pugi::xml_node plot); // Methods private: void set_id(pugi::xml_node plot_node); void set_type(pugi::xml_node plot_node); void set_output_path(pugi::xml_node plot_node); void set_bg_color(pugi::xml_node plot_node); void set_basis(pugi::xml_node plot_node); void set_origin(pugi::xml_node plot_node); void set_width(pugi::xml_node plot_node); void set_universe(pugi::xml_node plot_node); void set_default_colors(pugi::xml_node plot_node); void set_user_colors(pugi::xml_node plot_node); void set_meshlines(pugi::xml_node plot_node); void set_mask(pugi::xml_node plot_node); void set_overlap_color(pugi::xml_node plot_node); // Members public: int id_; //!< Plot ID PlotType type_; //!< Plot type (Slice/Voxel) PlotColorBy color_by_; //!< Plot coloring (cell/material) int meshlines_width_; //!< Width of lines added to the plot int index_meshlines_mesh_ {-1}; //!< Index of the mesh to draw on the plot RGBColor meshlines_color_; //!< Color of meshlines on the plot RGBColor not_found_ {WHITE}; //!< Plot background color RGBColor overlap_color_ {RED}; //!< Plot overlap color std::vector<RGBColor> colors_; //!< Plot colors std::string path_plot_; //!< Plot output filename }; //=============================================================================== // Non-member functions //=============================================================================== //! Add mesh lines to image data of a plot object //! \param[in] plot object //! \param[out] image data associated with the plot object void draw_mesh_lines(Plot pl, ImageData& data); //! Write a ppm image to file using a plot object's image data //! \param[in] plot object //! \param[out] image data associated with the plot object void output_ppm(Plot pl, const ImageData& data); //! Initialize a voxel file //! \param[in] id of an open hdf5 file //! \param[in] dimensions of the voxel file (dx, dy, dz) //! \param[out] dataspace pointer to voxel data //! \param[out] dataset pointer to voxesl data //! \param[out] pointer to memory space of voxel data void voxel_init(hid_t file_id, const hsize_t* dims, hid_t* dspace, hid_t* dset, hid_t* memspace); //! Write a section of the voxel data to hdf5 //! \param[in] voxel slice //! \param[out] dataspace pointer to voxel data //! \param[out] dataset pointer to voxesl data //! \param[out] pointer to data to write void voxel_write_slice(int x, hid_t dspace, hid_t dset, hid_t memspace, void* buf); //! Close voxel file entities //! \param[in] data space to close //! \param[in] dataset to close //! \param[in] memory space to close void voxel_finalize(hid_t dspace, hid_t dset, hid_t memspace); //=============================================================================== // External functions //=============================================================================== //! Read plot specifications from a plots.xml file void read_plots_xml(); //! Create a ppm image for a plot object //! \param[in] plot object void create_ppm(Plot pl); //! Create an hdf5 voxel file for a plot object //! \param[in] plot object void create_voxel(Plot pl); //! Create a randomly generated RGB color //! \return RGBColor with random value RGBColor random_color(); } // namespace openmc #endif // OPENMC_PLOT_H
SimpleThread.h
#pragma once #include "Core.h" #include "Misc/ScopeRWLock.h" class FSimpleThread : public FRunnable { public: FSimpleThread(const FString& TheName) : Name(TheName) { RunnableThread = FRunnableThread::Create(this, *Name); Log(__FUNCTION__); } virtual ~FSimpleThread() override { if (RunnableThread) { RunnableThread->WaitForCompletion(); delete RunnableThread; RunnableThread = nullptr; Log(__FUNCTION__); } } virtual bool Init() override { Log(__FUNCTION__); return true; } virtual uint32 Run() override { while (!bStop) { FPlatformProcess::Sleep(1); Log(__FUNCTION__); } return 0; } virtual void Exit() override { Log(__FUNCTION__); } virtual void Stop() override { bStop = true; if (RunnableThread) RunnableThread->WaitForCompletion(); } void Log(const char* Action) { uint32 CurrentThreadId = FPlatformTLS::GetCurrentThreadId(); FString CurrentThreadName = FThreadManager::Get().GetThreadName(CurrentThreadId); if (RunnableThread) { UE_LOG(LogTemp, Display, TEXT("%s@%s[%d] - %s,%d, %s"), *Name, *CurrentThreadName, CurrentThreadId, *RunnableThread->GetThreadName(), RunnableThread->GetThreadID(), ANSI_TO_TCHAR(Action)); } else { UE_LOG(LogTemp, Display, TEXT("%s@%s[%d] - %s,%d, %s"), *Name, *CurrentThreadName, CurrentThreadId, TEXT("NULL"), 0, ANSI_TO_TCHAR(Action)); } } public: FString Name; FRunnableThread* RunnableThread = nullptr; FThreadSafeBool bStop; }; class ThreadSafeArray { public: int32 GetValue(int32 Index) { FScopeLock Lock(&CS); return Values[Index]; } void AppendValue(int32 Value) { CS.Lock(); Values.Add(Value); CS.Unlock(); } private: FCriticalSection CS; TArray<int32> Values; }; class ThreadSafeArray2 { public: int32 GetValue(int32 Index) { FRWScopeLock ScopeLock(ValuesLock, SLT_ReadOnly); return Values[Index]; } void AppendValue(int32 Value) { ValuesLock.WriteLock(); Values.Add(Value); ValuesLock.WriteUnlock(); } private: FRWLock ValuesLock; TArray<int32> Values; }; ///////////////////////////////////////////////////////////////////////////////// #define SAFE_DELETE(Ptr) if (Ptr) { delete Ptr; Ptr = nullptr; } inline void DumpAllThreads(const char* Log) { FThreadManager::Get().ForEachThread( [=](uint32 ThreadID, FRunnableThread* Thread) { UE_LOG(LogTemp, Display, TEXT("%s: %s,%u"), ANSI_TO_TCHAR(Log), *Thread->GetThreadName(), ThreadID); }); } inline void Test_SimpleThread() { // Create Threads FSimpleThread* SimpleThread1 = new FSimpleThread(TEXT("SimpleThread1")); FSimpleThread* SimpleThread2 = new FSimpleThread(TEXT("SimpleThread2")); DumpAllThreads(__FUNCTION__); // Ticks int TickCount = 100; for (int i = 0; i < TickCount; ++i) { // Consume UE_LOG(LogTemp, Display, TEXT("Tick[%d] ........ "), i); FPlatformProcess::Sleep(0.1); } // Stop Thread SimpleThread1->Stop(); SimpleThread2->Stop(); // Destroy Threads SAFE_DELETE(SimpleThread1); SAFE_DELETE(SimpleThread2); } inline void Test_Atomic() { TAtomic<int> Counter; Counter ++; // Atomic increment -> FPlatformAtomics::InterlockedIncrement if (Counter.Load()) // Atomic read -> FPlatformAtomics::AtomicRead { } FThreadSafeCounter Counter2; Counter2.Increment(); // FPlatformAtomics::InterlockedIncrement Counter2.Decrement(); // FPlatformAtomics::InterlockedDecrement if (Counter2.GetValue() == 0) // FPlatformAtomics::AtomicRead { } } inline void Test_Event1() { FEvent* SyncEvent = nullptr; Async(EAsyncExecution::Thread, [&SyncEvent]() { FPlatformProcess::Sleep(3); if (SyncEvent) { SyncEvent->Trigger(); UE_LOG(LogTemp, Display, TEXT("Trigger .....")); } }); SyncEvent = FPlatformProcess::GetSynchEventFromPool(true); SyncEvent->Wait((uint32)-1); FPlatformProcess::ReturnSynchEventToPool(SyncEvent); UE_LOG(LogTemp, Display, TEXT("Over .....")); } inline void Test_Event2() { FEventRef SyncEvent(EEventMode::AutoReset); FEvent* Event = SyncEvent.operator->(); Async(EAsyncExecution::Thread, [Event]() { FPlatformProcess::Sleep(3); Event->Trigger(); UE_LOG(LogTemp, Display, TEXT("Trigger .....")); }); SyncEvent->Wait((uint32)-1); UE_LOG(LogTemp, Display, TEXT("Over .....")); } inline void Test_Event() { // waiting.. { FScopedEvent SyncEvent; Async(EAsyncExecution::Thread, [&SyncEvent]() { FPlatformProcess::Sleep(3); SyncEvent.Trigger(); UE_LOG(LogTemp, Display, TEXT("Trigger .....")); }); } UE_LOG(LogTemp, Display, TEXT("Over .....")); } ////////////////////////////////////////////////// // #include <omp.h> inline void Test_OpenMP() { static long num_rects = 1000000; // pi = Int(4/(1+x^2)) double mid, height, width, sum = 0; int i = 0; double area = 0; width = 1. / (double)num_rects; #pragma omp parallel for private(mid, height, width) reduce(+:sum) for (i = 0; i < num_rects; i++) { mid = (i + 0.5) * width; height = 4.0 / (1. + mid * mid); sum += height; } area = width * sum; UE_LOG(LogTemp, Display, TEXT("Pi is : %f"), area); } //////////////////////////////////////////////////
par_strength.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * *****************************************************************************/ /* following should be in a header file */ #include "_hypre_parcsr_ls.h" #include "hypre_hopscotch_hash.h" /*==========================================================================*/ /*==========================================================================*/ /** Generates strength matrix Notes: \begin{itemize} \item The underlying matrix storage scheme is a hypre_ParCSR matrix. \item The routine returns the following: \begin{itemize} \item S - a ParCSR matrix representing the "strength matrix". This is used in the coarsening and interpolation routines. \end{itemize} \item The graph of the "strength matrix" for A is a subgraph of the graph of A, but requires nonsymmetric storage even if A is symmetric. This is because of the directional nature of the "strengh of dependence" notion (see below). Since we are using nonsymmetric storage for A right now, this is not a problem. If we ever add the ability to store A symmetrically, then we could store the strength graph as floats instead of doubles to save space. \item This routine currently "compresses" the strength matrix. We should consider the possibility of defining this matrix to have the same "nonzero structure" as A. To do this, we could use the same A\_i and A\_j arrays, and would need only define the S\_data array. There are several pros and cons to discuss. \end{itemize} Terminology: \begin{itemize} \item Ruge's terminology: A point is "strongly connected to" $j$, or "strongly depends on" $j$, if $-a_ij >= \theta max_{l != j} \{-a_il\}$. \item Here, we retain some of this terminology, but with a more generalized notion of "strength". We also retain the "natural" graph notation for representing the directed graph of a matrix. That is, the nonzero entry $a_ij$ is represented as: i --> j. In the strength matrix, S, the entry $s_ij$ is also graphically denoted as above, and means both of the following: \begin{itemize} \item $i$ "depends on" $j$ with "strength" $s_ij$ \item $j$ "influences" $i$ with "strength" $s_ij$ \end{itemize} \end{itemize} {\bf Input files:} _hypre_parcsr_ls.h @return Error code. @param A [IN] coefficient matrix @param strength_threshold [IN] threshold parameter used to define strength @param max_row_sum [IN] parameter used to modify definition of strength for diagonal dominant matrices @param S_ptr [OUT] strength matrix @see */ /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateS(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jA, jS; HYPRE_Int ierr = 0; HYPRE_Int *dof_func_offd; HYPRE_Int num_sends; HYPRE_Int *int_buf_data; HYPRE_Int index, start, j; HYPRE_Int *prefix_sum_workspace; /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); /* row_starts is owned by A, col_starts = row_starts */ hypre_ParCSRMatrixSetRowStartsOwner(S,0); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_temp_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); S_diag_j = hypre_TAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); HYPRE_Int *S_temp_offd_j = NULL; dof_func_offd = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_temp_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_TAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; if (num_functions > 1) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); S_offd_j = hypre_TAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols_offd; i++) col_map_offd_S[i] = col_map_offd_A[i]; } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); /* give S same nonzero structure as A */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,diag,row_scale,row_sum,jA,jS) #endif { HYPRE_Int start, stop; hypre_GetSimpleThreadPartition(&start, &stop, num_variables); HYPRE_Int jS_diag = 0, jS_offd = 0; for (i = start; i < stop; i++) { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = diag; if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } /* diag >= 0*/ } /* num_functions <= 1 */ jS_diag += A_diag_i[i + 1] - A_diag_i[i] - 1; jS_offd += A_offd_i[i + 1] - A_offd_i[i]; /* compute row entries of S */ S_temp_diag_j[A_diag_i[i]] = -1; if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0)) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_temp_diag_j[jA] = -1; } jS_diag -= A_diag_i[i + 1] - (A_diag_i[i] + 1); for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_temp_offd_j[jA] = -1; } jS_offd -= A_offd_i[i + 1] - A_offd_i[i]; } else { if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] <= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] <= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] >= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] >= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] <= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] <= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] >= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] >= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } /* diag >= 0 */ } /* num_functions <= 1 */ } /* !((row_sum > max_row_sum) && (max_row_sum < 1.0)) */ } /* for each variable */ hypre_prefix_sum_pair(&jS_diag, S_diag_i + num_variables, &jS_offd, S_offd_i + num_variables, prefix_sum_workspace); /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ for (i = start; i < stop; i++) { S_diag_i[i] += jS_diag; S_offd_i[i] += jS_offd; jS = S_diag_i[i]; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_temp_diag_j[jA] > -1) { S_diag_j[jS] = S_temp_diag_j[jA]; jS++; } } jS = S_offd_i[i]; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_temp_offd_j[jA] > -1) { S_offd_j[jS] = S_temp_offd_j[jA]; jS++; } } } /* for each variable */ } /* omp parallel */ hypre_CSRMatrixNumNonzeros(S_diag) = S_diag_i[num_variables]; hypre_CSRMatrixNumNonzeros(S_offd) = S_offd_i[num_variables]; hypre_CSRMatrixJ(S_diag) = S_diag_j; hypre_CSRMatrixJ(S_offd) = S_offd_j; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_offd_j, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] += hypre_MPI_Wtime(); #endif return (ierr); } /* Create Strength matrix from CF marker array data. Provides a more general form to build S for specific nodes of the 'global' matrix (for example, F points or A_FF part), given the entire matrix. These nodes have the SMRK tag. Currently assumes num_functions == 1, hence separate routine is used for now. Could possibly be merged with BoomerAMGCreateS() to yield a more general function. */ HYPRE_Int hypre_BoomerAMGCreateSFromCFMarker(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int *CF_marker, HYPRE_Int SMRK, hypre_ParCSRMatrix **S_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jj, jA, jS; HYPRE_Int ierr = 0; HYPRE_Int *prefix_sum_workspace; /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); /* row_starts is owned by A, col_starts = row_starts */ hypre_ParCSRMatrixSetRowStartsOwner(S,0); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_temp_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); S_diag_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); /* #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_nonzeros_diag; i++) S_diag_j[i] = 0; */ HYPRE_Int *S_temp_offd_j = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_temp_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_TAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; S_offd_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); /* #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_nonzeros_offd; i++) S_offd_j[i] = 0; */ HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols_offd; i++) col_map_offd_S[i] = col_map_offd_A[i]; } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); /* give S same nonzero structure as A */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,diag,row_scale,row_sum,jA,jS) #endif { HYPRE_Int start, stop; hypre_GetSimpleThreadPartition(&start, &stop, num_variables); HYPRE_Int jS_diag = 0, jS_offd = 0; for (i = start; i < stop; i++) { if (CF_marker[i] == SMRK) { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = diag; if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker[jj] == SMRK) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker[jj] == SMRK) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0*/ jS_diag += A_diag_i[i + 1] - A_diag_i[i] - 1; jS_offd += A_offd_i[i + 1] - A_offd_i[i]; /* compute row entries of S */ S_temp_diag_j[A_diag_i[i]] = -1; if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0)) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; S_temp_diag_j[jA] = -1; } jS_diag -= A_diag_i[i + 1] - (A_diag_i[i] + 1); for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_temp_offd_j[jA] = -1; } jS_offd -= A_offd_i[i + 1] - A_offd_i[i]; } else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if (A_diag_data[jA] <= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = jj; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker[jj] == SMRK) { if (A_offd_data[jA] <= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = jj; } } else { S_temp_offd_j[jA] = -1; } } } /* diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if (A_diag_data[jA] >= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = jj; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker[jj] == SMRK) { if (A_offd_data[jA] >= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = jj; } } else { S_temp_offd_j[jA] = -1; } } } /* diag >= 0 */ } /* !((row_sum > max_row_sum) && (max_row_sum < 1.0)) */ } /* CF_marker == SMRK */ else { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } jS_diag += A_diag_i[i + 1] - A_diag_i[i] - 1; jS_offd += A_offd_i[i + 1] - A_offd_i[i]; for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_temp_diag_j[jA] = -1; } jS_diag -= A_diag_i[i + 1] - (A_diag_i[i] + 1); for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_temp_offd_j[jA] = -1; } jS_offd -= A_offd_i[i + 1] - A_offd_i[i]; } /* CF_marker != SMRK */ } /* for each variable */ hypre_prefix_sum_pair(&jS_diag, S_diag_i + num_variables, &jS_offd, S_offd_i + num_variables, prefix_sum_workspace); /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ for (i = start; i < stop; i++) { S_diag_i[i] += jS_diag; S_offd_i[i] += jS_offd; jS = S_diag_i[i]; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_temp_diag_j[jA] > -1) { S_diag_j[jS] = S_temp_diag_j[jA]; jS++; } } jS = S_offd_i[i]; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_temp_offd_j[jA] > -1) { S_offd_j[jS] = S_temp_offd_j[jA]; jS++; } } } /* for each variable */ } /* omp parallel */ hypre_CSRMatrixNumNonzeros(S_diag) = S_diag_i[num_variables]; hypre_CSRMatrixNumNonzeros(S_offd) = S_offd_i[num_variables]; hypre_CSRMatrixJ(S_diag) = S_diag_j; hypre_CSRMatrixJ(S_offd) = S_offd_j; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_offd_j, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] += hypre_MPI_Wtime(); #endif return (ierr); } /*==========================================================================*/ /*==========================================================================*/ /** Generates strength matrix Notes: \begin{itemize} \item The underlying matrix storage scheme is a hypre_ParCSR matrix. \item The routine returns the following: \begin{itemize} \item S - a ParCSR matrix representing the "strength matrix". This is used in the coarsening and interpolation routines. \end{itemize} \item The graph of the "strength matrix" for A is a subgraph of the graph of A, but requires nonsymmetric storage even if A is symmetric. This is because of the directional nature of the "strengh of dependence" notion (see below). Since we are using nonsymmetric storage for A right now, this is not a problem. If we ever add the ability to store A symmetrically, then we could store the strength graph as floats instead of doubles to save space. \item This routine currently "compresses" the strength matrix. We should consider the possibility of defining this matrix to have the same "nonzero structure" as A. To do this, we could use the same A\_i and A\_j arrays, and would need only define the S\_data array. There are several pros and cons to discuss. \end{itemize} Terminology: \begin{itemize} \item Ruge's terminology: A point is "strongly connected to" $j$, or "strongly depends on" $j$, if $|a_ij| >= \theta max_{l != j} |a_il|}$. \item Here, we retain some of this terminology, but with a more generalized notion of "strength". We also retain the "natural" graph notation for representing the directed graph of a matrix. That is, the nonzero entry $a_ij$ is represented as: i --> j. In the strength matrix, S, the entry $s_ij$ is also graphically denoted as above, and means both of the following: \begin{itemize} \item $i$ "depends on" $j$ with "strength" $s_ij$ \item $j$ "influences" $i$ with "strength" $s_ij$ \end{itemize} \end{itemize} {\bf Input files:} _hypre_parcsr_ls.h @return Error code. @param A [IN] coefficient matrix @param strength_threshold [IN] threshold parameter used to define strength @param max_row_sum [IN] parameter used to modify definition of strength for diagonal dominant matrices @param S_ptr [OUT] strength matrix @see */ /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSabs(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jA, jS; HYPRE_Int ierr = 0; HYPRE_Int *dof_func_offd; HYPRE_Int num_sends; HYPRE_Int *int_buf_data; HYPRE_Int index, start, j; /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); /* row_starts is owned by A, col_starts = row_starts */ hypre_ParCSRMatrixSetRowStartsOwner(S,0); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); S_diag_i = hypre_CSRMatrixI(S_diag); S_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); dof_func_offd = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrixColMapOffd(S) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /* give S same nonzero structure as A */ hypre_ParCSRMatrixCopy(A,S,0); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_variables; i++) { diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = diag; if (num_functions > 1) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_max(row_scale, fabs(A_diag_data[jA])); row_sum += fabs(A_diag_data[jA]); } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_max(row_scale, fabs(A_offd_data[jA])); row_sum += fabs(A_offd_data[jA]); } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_max(row_scale, fabs(A_diag_data[jA])); row_sum += fabs(A_diag_data[jA]); } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_max(row_scale, fabs(A_offd_data[jA])); row_sum += fabs(A_offd_data[jA]); } } /* compute row entries of S */ S_diag_j[A_diag_i[i]] = -1; if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0)) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_offd_j[jA] = -1; } } else { if (num_functions > 1) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (fabs(A_diag_data[jA]) <= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (fabs(A_offd_data[jA]) <= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_offd_j[jA] = -1; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (fabs(A_diag_data[jA]) <= strength_threshold * row_scale) { S_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (fabs(A_offd_data[jA]) <= strength_threshold * row_scale) { S_offd_j[jA] = -1; } } } } } /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ /* RDF: not sure if able to thread this loop */ jS = 0; for (i = 0; i < num_variables; i++) { S_diag_i[i] = jS; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_diag_j[jA] > -1) { S_diag_j[jS] = S_diag_j[jA]; jS++; } } } S_diag_i[num_variables] = jS; hypre_CSRMatrixNumNonzeros(S_diag) = jS; /* RDF: not sure if able to thread this loop */ jS = 0; for (i = 0; i < num_variables; i++) { S_offd_i[i] = jS; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_offd_j[jA] > -1) { S_offd_j[jS] = S_offd_j[jA]; jS++; } } } S_offd_i[num_variables] = jS; hypre_CSRMatrixNumNonzeros(S_offd) = jS; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); return (ierr); } /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSCommPkg(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *S, HYPRE_Int **col_offd_S_to_A_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Status *status; hypre_MPI_Request *requests; hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommPkg *comm_pkg_S; hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int *recv_procs_A = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts_A = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int *send_procs_A = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int *recv_procs_S; HYPRE_Int *recv_vec_starts_S; HYPRE_Int *send_procs_S; HYPRE_Int *send_map_starts_S; HYPRE_Int *send_map_elmts_S = NULL; HYPRE_BigInt *big_send_map_elmts_S = NULL; HYPRE_Int *col_offd_S_to_A; HYPRE_Int *S_marker; HYPRE_Int *send_change; HYPRE_Int *recv_change; HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int num_cols_offd_S; HYPRE_Int i, j, jcol; HYPRE_Int proc, cnt, proc_cnt, total_nz; HYPRE_BigInt first_row; HYPRE_Int ierr = 0; HYPRE_Int num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int num_recvs_A = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int num_sends_S; HYPRE_Int num_recvs_S; HYPRE_Int num_nonzeros; num_nonzeros = S_offd_i[num_variables]; S_marker = NULL; if (num_cols_offd_A) S_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_A; i++) S_marker[i] = -1; for (i=0; i < num_nonzeros; i++) { jcol = S_offd_j[i]; S_marker[jcol] = 0; } proc = 0; proc_cnt = 0; cnt = 0; num_recvs_S = 0; for (i=0; i < num_recvs_A; i++) { for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++) { if (!S_marker[j]) { S_marker[j] = cnt; cnt++; proc = 1; } } if (proc) {num_recvs_S++; proc = 0;} } num_cols_offd_S = cnt; recv_change = NULL; recv_procs_S = NULL; send_change = NULL; if (col_map_offd_S) hypre_TFree(col_map_offd_S, HYPRE_MEMORY_HOST); col_map_offd_S = NULL; col_offd_S_to_A = NULL; if (num_recvs_A) recv_change = hypre_CTAlloc(HYPRE_Int, num_recvs_A, HYPRE_MEMORY_HOST); if (num_sends_A) send_change = hypre_CTAlloc(HYPRE_Int, num_sends_A, HYPRE_MEMORY_HOST); if (num_recvs_S) recv_procs_S = hypre_CTAlloc(HYPRE_Int, num_recvs_S, HYPRE_MEMORY_HOST); recv_vec_starts_S = hypre_CTAlloc(HYPRE_Int, num_recvs_S+1, HYPRE_MEMORY_HOST); if (num_cols_offd_S) { col_map_offd_S = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_S, HYPRE_MEMORY_HOST); col_offd_S_to_A = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); } if (num_cols_offd_S < num_cols_offd_A) { for (i=0; i < num_nonzeros; i++) { jcol = S_offd_j[i]; S_offd_j[i] = S_marker[jcol]; } proc = 0; proc_cnt = 0; cnt = 0; recv_vec_starts_S[0] = 0; for (i=0; i < num_recvs_A; i++) { for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++) { if (S_marker[j] != -1) { col_map_offd_S[cnt] = col_map_offd_A[j]; col_offd_S_to_A[cnt++] = j; proc = 1; } } recv_change[i] = j-cnt-recv_vec_starts_A[i] +recv_vec_starts_S[proc_cnt]; if (proc) { recv_procs_S[proc_cnt++] = recv_procs_A[i]; recv_vec_starts_S[proc_cnt] = cnt; proc = 0; } } } else { for (i=0; i < num_recvs_A; i++) { for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++) { col_map_offd_S[j] = col_map_offd_A[j]; col_offd_S_to_A[j] = j; } recv_procs_S[i] = recv_procs_A[i]; recv_vec_starts_S[i] = recv_vec_starts_A[i]; } recv_vec_starts_S[num_recvs_A] = recv_vec_starts_A[num_recvs_A]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_sends_A+num_recvs_A, HYPRE_MEMORY_HOST); j=0; for (i=0; i < num_sends_A; i++) hypre_MPI_Irecv(&send_change[i],1,HYPRE_MPI_INT,send_procs_A[i], 0,comm,&requests[j++]); for (i=0; i < num_recvs_A; i++) hypre_MPI_Isend(&recv_change[i],1,HYPRE_MPI_INT,recv_procs_A[i], 0,comm,&requests[j++]); status = hypre_CTAlloc(hypre_MPI_Status, j, HYPRE_MEMORY_HOST); hypre_MPI_Waitall(j,requests,status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); num_sends_S = 0; total_nz = send_map_starts_A[num_sends_A]; for (i=0; i < num_sends_A; i++) { if (send_change[i]) { if ((send_map_starts_A[i+1]-send_map_starts_A[i]) > send_change[i]) num_sends_S++; } else num_sends_S++; total_nz -= send_change[i]; } send_procs_S = NULL; if (num_sends_S) send_procs_S = hypre_CTAlloc(HYPRE_Int, num_sends_S, HYPRE_MEMORY_HOST); send_map_starts_S = hypre_CTAlloc(HYPRE_Int, num_sends_S+1, HYPRE_MEMORY_HOST); send_map_elmts_S = NULL; if (total_nz) { send_map_elmts_S = hypre_CTAlloc(HYPRE_Int, total_nz, HYPRE_MEMORY_HOST); big_send_map_elmts_S = hypre_CTAlloc(HYPRE_BigInt, total_nz, HYPRE_MEMORY_HOST); } proc = 0; proc_cnt = 0; for (i=0; i < num_sends_A; i++) { cnt = send_map_starts_A[i+1]-send_map_starts_A[i]-send_change[i]; if (cnt) { send_procs_S[proc_cnt++] = send_procs_A[i]; send_map_starts_S[proc_cnt] = send_map_starts_S[proc_cnt-1]+cnt; } } comm_pkg_S = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_S) = comm; hypre_ParCSRCommPkgNumRecvs(comm_pkg_S) = num_recvs_S; hypre_ParCSRCommPkgRecvProcs(comm_pkg_S) = recv_procs_S; hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_S) = recv_vec_starts_S; hypre_ParCSRCommPkgNumSends(comm_pkg_S) = num_sends_S; hypre_ParCSRCommPkgSendProcs(comm_pkg_S) = send_procs_S; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_S) = send_map_starts_S; comm_handle = hypre_ParCSRCommHandleCreate(22, comm_pkg_S, col_map_offd_S, big_send_map_elmts_S); hypre_ParCSRCommHandleDestroy(comm_handle); first_row = hypre_ParCSRMatrixFirstRowIndex(A); if (first_row) for (i=0; i < send_map_starts_S[num_sends_S]; i++) send_map_elmts_S[i] = (HYPRE_Int)(big_send_map_elmts_S[i]-first_row); hypre_ParCSRCommPkgSendMapElmts(comm_pkg_S) = send_map_elmts_S; hypre_ParCSRMatrixCommPkg(S) = comm_pkg_S; hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; hypre_CSRMatrixNumCols(S_offd) = num_cols_offd_S; hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(send_change, HYPRE_MEMORY_HOST); hypre_TFree(recv_change, HYPRE_MEMORY_HOST); *col_offd_S_to_A_ptr = col_offd_S_to_A; return ierr; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCreate2ndS : creates strength matrix on coarse points * for second coarsening pass in aggressive coarsening (S*S+2S) *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreate2ndS( hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_Int num_paths, HYPRE_BigInt *coarse_row_starts, hypre_ParCSRMatrix **C_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATE_2NDS] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int num_cols_diag_S = hypre_CSRMatrixNumCols(S_diag); HYPRE_Int num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd); hypre_ParCSRMatrix *S2; HYPRE_BigInt *col_map_offd_C = NULL; hypre_CSRMatrix *C_diag; /*HYPRE_Int *C_diag_data = NULL;*/ HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j = NULL; hypre_CSRMatrix *C_offd; /*HYPRE_Int *C_offd_data=NULL;*/ HYPRE_Int *C_offd_i; HYPRE_Int *C_offd_j=NULL; HYPRE_Int num_cols_offd_C = 0; HYPRE_Int *S_ext_diag_i = NULL; HYPRE_Int *S_ext_diag_j = NULL; HYPRE_Int S_ext_diag_size = 0; HYPRE_Int *S_ext_offd_i = NULL; HYPRE_Int *S_ext_offd_j = NULL; HYPRE_Int S_ext_offd_size = 0; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *S_marker = NULL; HYPRE_Int *S_marker_offd = NULL; //HYPRE_Int *temp = NULL; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int *map_S_to_C = NULL; HYPRE_Int num_sends = 0; HYPRE_Int num_recvs = 0; HYPRE_Int *send_map_starts; HYPRE_Int *tmp_send_map_starts = NULL; HYPRE_Int *send_map_elmts; HYPRE_Int *recv_vec_starts; HYPRE_Int *tmp_recv_vec_starts = NULL; HYPRE_Int *int_buf_data = NULL; HYPRE_BigInt *big_int_buf_data = NULL; HYPRE_BigInt *temp = NULL; HYPRE_Int i, j, k; HYPRE_Int i1, i2, i3; HYPRE_BigInt big_i1; HYPRE_Int jj1, jj2, jrow, j_cnt; /*HYPRE_Int cnt, cnt_offd, cnt_diag;*/ HYPRE_Int num_procs, my_id; HYPRE_Int index; /*HYPRE_Int value;*/ HYPRE_Int num_coarse; HYPRE_Int num_nonzeros; HYPRE_BigInt global_num_coarse; HYPRE_BigInt my_first_cpt, my_last_cpt; HYPRE_Int *S_int_i = NULL; HYPRE_BigInt *S_int_j = NULL; HYPRE_Int *S_ext_i = NULL; HYPRE_BigInt *S_ext_j = NULL; /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ HYPRE_Int *prefix_sum_workspace; HYPRE_Int *num_coarse_prefix_sum; prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); num_coarse_prefix_sum = hypre_TAlloc(HYPRE_Int, hypre_NumThreads() + 1, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Extract S_ext, i.e. portion of B that is stored on neighbor procs * and needed locally for matrix matrix product *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = coarse_row_starts[0]; my_last_cpt = coarse_row_starts[1]-1; if (my_id == (num_procs -1)) global_num_coarse = coarse_row_starts[1]; hypre_MPI_Bcast(&global_num_coarse, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else my_first_cpt = coarse_row_starts[my_id]; my_last_cpt = coarse_row_starts[my_id+1]-1; global_num_coarse = coarse_row_starts[num_procs]; #endif if (num_cols_offd_S) { CF_marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_S, HYPRE_MEMORY_HOST); } HYPRE_Int *coarse_to_fine = NULL; if (num_cols_diag_S) { fine_to_coarse = hypre_TAlloc(HYPRE_Int, num_cols_diag_S, HYPRE_MEMORY_HOST); coarse_to_fine = hypre_TAlloc(HYPRE_Int, num_cols_diag_S, HYPRE_MEMORY_HOST); } /*HYPRE_Int num_coarse_prefix_sum[hypre_NumThreads() + 1];*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i) #endif { HYPRE_Int num_coarse_private = 0; HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_diag_S); for (i = i_begin; i < i_end; i++) { if (CF_marker[i] > 0) num_coarse_private++; } hypre_prefix_sum(&num_coarse_private, &num_coarse, num_coarse_prefix_sum); for (i = i_begin; i < i_end; i++) { if (CF_marker[i] > 0) { fine_to_coarse[i] = num_coarse_private; coarse_to_fine[num_coarse_private] = i; num_coarse_private++; } else { fine_to_coarse[i] = -1; } } } /* omp parallel */ if (num_procs > 1) { if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); HYPRE_Int begin = send_map_starts[0]; HYPRE_Int end = send_map_starts[num_sends]; big_int_buf_data = hypre_TAlloc(HYPRE_BigInt, end, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (index = begin; index < end; index++) { big_int_buf_data[index - begin] = (HYPRE_BigInt)fine_to_coarse[send_map_elmts[index]] + my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); int_buf_data = hypre_TAlloc(HYPRE_Int, end, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (index = begin; index < end; index++) { int_buf_data[index - begin] = CF_marker[send_map_elmts[index]]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(big_int_buf_data, HYPRE_MEMORY_HOST); S_int_i = hypre_TAlloc(HYPRE_Int, end+1, HYPRE_MEMORY_HOST); S_ext_i = hypre_CTAlloc(HYPRE_Int, recv_vec_starts[num_recvs]+1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * generate S_int_i through adding number of coarse row-elements of offd and diag * for corresponding rows. S_int_i[j+1] contains the number of coarse elements of * a row j (which is determined through send_map_elmts) *--------------------------------------------------------------------------*/ S_int_i[0] = 0; num_nonzeros = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j,k) reduction(+:num_nonzeros) HYPRE_SMP_SCHEDULE #endif for (j = begin; j < end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int index = 0; for (k = S_diag_i[jrow]; k < S_diag_i[jrow+1]; k++) { if (CF_marker[S_diag_j[k]] > 0) index++; } for (k = S_offd_i[jrow]; k < S_offd_i[jrow+1]; k++) { if (CF_marker_offd[S_offd_j[k]] > 0) index++; } S_int_i[j - begin + 1] = index; num_nonzeros += S_int_i[j - begin + 1]; } /*-------------------------------------------------------------------------- * initialize communication *--------------------------------------------------------------------------*/ if (num_procs > 1) comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg,&S_int_i[1],&S_ext_i[1]); if (num_nonzeros) S_int_j = hypre_TAlloc(HYPRE_BigInt, num_nonzeros, HYPRE_MEMORY_HOST); tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); tmp_send_map_starts[0] = 0; j_cnt = 0; for (i=0; i < num_sends; i++) { for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++) { jrow = send_map_elmts[j]; for (k=S_diag_i[jrow]; k < S_diag_i[jrow+1]; k++) { if (CF_marker[S_diag_j[k]] > 0) S_int_j[j_cnt++] = (HYPRE_BigInt)fine_to_coarse[S_diag_j[k]]+my_first_cpt; } for (k=S_offd_i[jrow]; k < S_offd_i[jrow+1]; k++) { if (CF_marker_offd[S_offd_j[k]] > 0) S_int_j[j_cnt++] = fine_to_coarse_offd[S_offd_j[k]]; } } tmp_send_map_starts[i+1] = j_cnt; } tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts; hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /*-------------------------------------------------------------------------- * after communication exchange S_ext_i[j+1] contains the number of coarse elements * of a row j ! * evaluate S_ext_i and compute num_nonzeros for S_ext *--------------------------------------------------------------------------*/ for (i=0; i < recv_vec_starts[num_recvs]; i++) S_ext_i[i+1] += S_ext_i[i]; num_nonzeros = S_ext_i[recv_vec_starts[num_recvs]]; if (num_nonzeros) S_ext_j = hypre_TAlloc(HYPRE_BigInt, num_nonzeros, HYPRE_MEMORY_HOST); tmp_recv_vec_starts[0] = 0; for (i=0; i < num_recvs; i++) tmp_recv_vec_starts[i+1] = S_ext_i[recv_vec_starts[i+1]]; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts; comm_handle = hypre_ParCSRCommHandleCreate(21,tmp_comm_pkg,S_int_j,S_ext_j); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(tmp_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_TFree(S_int_i, HYPRE_MEMORY_HOST); hypre_TFree(S_int_j, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_BigInt *S_big_offd_j = NULL; S_ext_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); S_ext_diag_i[0] = 0; S_ext_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); S_ext_offd_i[0] = 0; hypre_UnorderedBigIntSet found_set; hypre_UnorderedBigIntSetCreate(&found_set, S_ext_i[num_cols_offd_S] + num_cols_offd_S, 16*hypre_NumThreads()); #pragma omp parallel private(i,j, big_i1) { HYPRE_Int S_ext_offd_size_private = 0; HYPRE_Int S_ext_diag_size_private = 0; HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_S); for (i = i_begin; i < i_end; i++) { if (CF_marker_offd[i] > 0) { hypre_UnorderedBigIntSetPut(&found_set, fine_to_coarse_offd[i]); } for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) { S_ext_offd_size_private++; hypre_UnorderedBigIntSetPut(&found_set, big_i1); } else S_ext_diag_size_private++; } } hypre_prefix_sum_pair( &S_ext_diag_size_private, &S_ext_diag_size, &S_ext_offd_size_private, &S_ext_offd_size, prefix_sum_workspace); #pragma omp master { if (S_ext_diag_size) S_ext_diag_j = hypre_TAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST); if (S_ext_offd_size) { S_ext_offd_j = hypre_TAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST); S_big_offd_j = hypre_TAlloc(HYPRE_BigInt, S_ext_offd_size, HYPRE_MEMORY_HOST); } } #pragma omp barrier for (i = i_begin; i < i_end; i++) { for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) S_big_offd_j[S_ext_offd_size_private++] = big_i1; //S_ext_offd_j[S_ext_offd_size_private++] = big_i1; else S_ext_diag_j[S_ext_diag_size_private++] = (HYPRE_Int)(big_i1 - my_first_cpt); } S_ext_diag_i[i + 1] = S_ext_diag_size_private; S_ext_offd_i[i + 1] = S_ext_offd_size_private; } } // omp parallel temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_C); hypre_UnorderedBigIntSetDestroy(&found_set); hypre_TFree(S_ext_i, HYPRE_MEMORY_HOST); hypre_UnorderedBigIntMap col_map_offd_C_inverse; hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i=0 ; i < S_ext_offd_size; i++) S_ext_offd_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, S_big_offd_j[i]); //S_ext_offd_j[i] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, S_ext_offd_j[i]); hypre_TFree(S_ext_j, HYPRE_MEMORY_HOST); hypre_TFree(S_big_offd_j, HYPRE_MEMORY_HOST); if (num_cols_offd_C) hypre_UnorderedBigIntMapDestroy(&col_map_offd_C_inverse); #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_Int cnt_offd, cnt_diag, cnt, value; S_ext_diag_size = 0; S_ext_offd_size = 0; for (i=0; i < num_cols_offd_S; i++) { for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { if (S_ext_j[j] < my_first_cpt || S_ext_j[j] > my_last_cpt) S_ext_offd_size++; else S_ext_diag_size++; } } S_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); S_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); if (S_ext_diag_size) { S_ext_diag_j = hypre_CTAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST); } if (S_ext_offd_size) { S_ext_offd_j = hypre_CTAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST); } cnt_offd = 0; cnt_diag = 0; cnt = 0; HYPRE_Int num_coarse_offd = 0; for (i=0; i < num_cols_offd_S; i++) { if (CF_marker_offd[i] > 0) num_coarse_offd++; for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) S_ext_j[cnt_offd++] = big_i1; else S_ext_diag_j[cnt_diag++] = (HYPRE_Int)(big_i1 - my_first_cpt); } S_ext_diag_i[++cnt] = cnt_diag; S_ext_offd_i[cnt] = cnt_offd; } hypre_TFree(S_ext_i, HYPRE_MEMORY_HOST); cnt = 0; if (S_ext_offd_size || num_coarse_offd) { temp = hypre_CTAlloc(HYPRE_BigInt, S_ext_offd_size+num_coarse_offd, HYPRE_MEMORY_HOST); for (i=0; i < S_ext_offd_size; i++) temp[i] = S_ext_j[i]; cnt = S_ext_offd_size; for (i=0; i < num_cols_offd_S; i++) if (CF_marker_offd[i] > 0) temp[cnt++] = fine_to_coarse_offd[i]; } if (cnt) { hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_C = 1; value = temp[0]; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_C; i++) col_map_offd_C[i] = temp[i]; if (S_ext_offd_size || num_coarse_offd) hypre_TFree(temp, HYPRE_MEMORY_HOST); for (i=0 ; i < S_ext_offd_size; i++) S_ext_offd_j[i] = hypre_BigBinarySearch(col_map_offd_C, S_ext_j[i], num_cols_offd_C); hypre_TFree(S_ext_j, HYPRE_MEMORY_HOST); #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (num_cols_offd_S) { map_S_to_C = hypre_TAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i) #endif { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_S); HYPRE_BigInt cnt = 0; for (i = i_begin; i < i_end; i++) { if (CF_marker_offd[i] > 0) { cnt = hypre_BigLowerBound(col_map_offd_C + cnt, col_map_offd_C + num_cols_offd_C, fine_to_coarse_offd[i]) - col_map_offd_C; map_S_to_C[i] = cnt++; } else map_S_to_C[i] = -1; } } /* omp parallel */ } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif } /* num_procs > 1 */ /*----------------------------------------------------------------------- * Allocate and initialize some stuff. *-----------------------------------------------------------------------*/ HYPRE_Int *S_marker_array = NULL, *S_marker_offd_array = NULL; if (num_coarse) S_marker_array = hypre_TAlloc(HYPRE_Int, num_coarse*hypre_NumThreads(), HYPRE_MEMORY_HOST); if (num_cols_offd_C) S_marker_offd_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C*hypre_NumThreads(), HYPRE_MEMORY_HOST); HYPRE_Int *C_temp_offd_j_array = NULL; HYPRE_Int *C_temp_diag_j_array = NULL; HYPRE_Int *C_temp_offd_data_array = NULL; HYPRE_Int *C_temp_diag_data_array = NULL; if (num_paths > 1) { C_temp_diag_j_array = hypre_TAlloc(HYPRE_Int, num_coarse*hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_offd_j_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C*hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_diag_data_array = hypre_TAlloc(HYPRE_Int, num_coarse*hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_offd_data_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C*hypre_NumThreads(), HYPRE_MEMORY_HOST); } C_diag_i = hypre_CTAlloc(HYPRE_Int, num_coarse+1, HYPRE_MEMORY_HOST); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_coarse+1, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Loop over rows of S *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i1,i2,i3,jj1,jj2,index) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); HYPRE_Int i1_begin, i1_end; hypre_GetSimpleThreadPartition(&i1_begin, &i1_end, num_cols_diag_S); HYPRE_Int *C_temp_diag_j = NULL, *C_temp_offd_j = NULL; HYPRE_Int *C_temp_diag_data = NULL, *C_temp_offd_data = NULL; if (num_paths > 1) { C_temp_diag_j = C_temp_diag_j_array + num_coarse*my_thread_num; C_temp_offd_j = C_temp_offd_j_array + num_cols_offd_C*my_thread_num; C_temp_diag_data = C_temp_diag_data_array + num_coarse*my_thread_num; C_temp_offd_data = C_temp_offd_data_array + num_cols_offd_C*my_thread_num; } HYPRE_Int *S_marker = NULL, *S_marker_offd = NULL; if (num_coarse) S_marker = S_marker_array + num_coarse*my_thread_num; if (num_cols_offd_C) S_marker_offd = S_marker_offd_array + num_cols_offd_C*my_thread_num; for (i1 = 0; i1 < num_coarse; i1++) { S_marker[i1] = -1; } for (i1 = 0; i1 < num_cols_offd_C; i1++) { S_marker_offd[i1] = -1; } // These two counters are for before filtering by num_paths HYPRE_Int jj_count_diag = 0; HYPRE_Int jj_count_offd = 0; // These two counters are for after filtering by num_paths HYPRE_Int num_nonzeros_diag = 0; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int ic_begin = num_coarse_prefix_sum[my_thread_num]; HYPRE_Int ic_end = num_coarse_prefix_sum[my_thread_num + 1]; HYPRE_Int ic; if (num_paths == 1) { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = num_nonzeros_diag; HYPRE_Int jj_row_begin_offd = num_nonzeros_offd; C_diag_i[ic] = num_nonzeros_diag; if (num_cols_offd_C) { C_offd_i[ic] = num_nonzeros_offd; } for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; num_nonzeros_diag++; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0) { index = fine_to_coarse[i3]; if (index != ic && S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; num_nonzeros_diag++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; num_nonzeros_offd++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; num_nonzeros_offd++; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic && S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = num_nonzeros_diag; num_nonzeros_diag++; } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = num_nonzeros_offd; num_nonzeros_offd++; } } } } /* for each row */ } /* num_paths == 1 */ else { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = jj_count_diag; HYPRE_Int jj_row_begin_offd = jj_count_offd; C_diag_i[ic] = num_nonzeros_diag; if (num_cols_offd_C) { C_offd_i[ic] = num_nonzeros_offd; } for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 2; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag] += 2; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0 && fine_to_coarse[i3] != ic) { index = fine_to_coarse[i3]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag]++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd]++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 2; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd] += 2; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic) { if (S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[i3] - jj_row_begin_diag]++; } } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[i3] - jj_row_begin_offd]++; } } } for (jj1 = jj_row_begin_diag; jj1 < jj_count_diag; jj1++) { if (C_temp_diag_data[jj1 - jj_row_begin_diag] >= num_paths) { ++num_nonzeros_diag; } C_temp_diag_data[jj1 - jj_row_begin_diag] = 0; } for (jj1 = jj_row_begin_offd; jj1 < jj_count_offd; jj1++) { if (C_temp_offd_data[jj1 - jj_row_begin_offd] >= num_paths) { ++num_nonzeros_offd; } C_temp_offd_data[jj1 - jj_row_begin_offd] = 0; } } /* for each row */ } /* num_paths > 1 */ hypre_prefix_sum_pair( &num_nonzeros_diag, &C_diag_i[num_coarse], &num_nonzeros_offd, &C_offd_i[num_coarse], prefix_sum_workspace); for (i1 = 0; i1 < num_coarse; i1++) { S_marker[i1] = -1; } for (i1 = 0; i1 < num_cols_offd_C; i1++) { S_marker_offd[i1] = -1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #pragma omp master #endif { if (C_diag_i[num_coarse]) { C_diag_j = hypre_TAlloc(HYPRE_Int, C_diag_i[num_coarse], HYPRE_MEMORY_HOST); } if (C_offd_i[num_coarse]) { C_offd_j = hypre_TAlloc(HYPRE_Int, C_offd_i[num_coarse], HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (ic = ic_begin; ic < ic_end - 1; ic++) { if (C_diag_i[ic+1] == C_diag_i[ic] && C_offd_i[ic+1] == C_offd_i[ic]) CF_marker[coarse_to_fine[ic]] = 2; C_diag_i[ic] += num_nonzeros_diag; C_offd_i[ic] += num_nonzeros_offd; } if (ic_begin < ic_end) { C_diag_i[ic] += num_nonzeros_diag; C_offd_i[ic] += num_nonzeros_offd; HYPRE_Int next_C_diag_i = prefix_sum_workspace[2*(my_thread_num + 1)]; HYPRE_Int next_C_offd_i = prefix_sum_workspace[2*(my_thread_num + 1) + 1]; if (next_C_diag_i == C_diag_i[ic] && next_C_offd_i == C_offd_i[ic]) CF_marker[coarse_to_fine[ic]] = 2; } if (num_paths == 1) { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = num_nonzeros_diag; HYPRE_Int jj_row_begin_offd = num_nonzeros_offd; for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = index; num_nonzeros_diag++; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0) { index = fine_to_coarse[i3]; if (index != ic && S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = index; num_nonzeros_diag++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = index; num_nonzeros_offd++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = index; num_nonzeros_offd++; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic && S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = i3; num_nonzeros_diag++; } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = i3; num_nonzeros_offd++; } } } } /* for each row */ } /* num_paths == 1 */ else { jj_count_diag = num_nonzeros_diag; jj_count_offd = num_nonzeros_offd; for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = jj_count_diag; HYPRE_Int jj_row_begin_offd = jj_count_offd; for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = index; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 2; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag] += 2; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0 && fine_to_coarse[i3] != ic) { index = fine_to_coarse[i3]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = index; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag]++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = index; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd]++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = index; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 2; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd] += 2; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic) { if (S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = i3; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[i3] - jj_row_begin_diag]++; } } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = i3; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[i3] - jj_row_begin_offd]++; } } } for (jj1 = jj_row_begin_diag; jj1 < jj_count_diag; jj1++) { if (C_temp_diag_data[jj1 - jj_row_begin_diag] >= num_paths) { C_diag_j[num_nonzeros_diag++] = C_temp_diag_j[jj1 - jj_row_begin_diag]; } C_temp_diag_data[jj1 - jj_row_begin_diag] = 0; } for (jj1 = jj_row_begin_offd; jj1 < jj_count_offd; jj1++) { if (C_temp_offd_data[jj1 - jj_row_begin_offd] >= num_paths) { C_offd_j[num_nonzeros_offd++] = C_temp_offd_j[jj1 - jj_row_begin_offd]; } C_temp_offd_data[jj1 - jj_row_begin_offd] = 0; } } /* for each row */ } /* num_paths > 1 */ } /* omp parallel */ S2 = hypre_ParCSRMatrixCreate(comm, global_num_coarse, global_num_coarse, coarse_row_starts, coarse_row_starts, num_cols_offd_C, C_diag_i[num_coarse], C_offd_i[num_coarse]); hypre_ParCSRMatrixOwnsRowStarts(S2) = 0; C_diag = hypre_ParCSRMatrixDiag(S2); hypre_CSRMatrixI(C_diag) = C_diag_i; if (C_diag_i[num_coarse]) hypre_CSRMatrixJ(C_diag) = C_diag_j; C_offd = hypre_ParCSRMatrixOffd(S2); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_ParCSRMatrixOffd(S2) = C_offd; if (num_cols_offd_C) { if (C_offd_i[num_coarse]) hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_ParCSRMatrixColMapOffd(S2) = col_map_offd_C; } /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ hypre_TFree(C_temp_diag_j_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_diag_data_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_offd_j_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_offd_data_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_offd_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(S_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_to_fine, HYPRE_MEMORY_HOST); if (S_ext_diag_size) { hypre_TFree(S_ext_diag_j, HYPRE_MEMORY_HOST); } hypre_TFree(S_ext_offd_i, HYPRE_MEMORY_HOST); if (S_ext_offd_size) { hypre_TFree(S_ext_offd_j, HYPRE_MEMORY_HOST); } if (num_cols_offd_S) { hypre_TFree(map_S_to_C, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); } *C_ptr = S2; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATE_2NDS] += hypre_MPI_Wtime(); #endif hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(num_coarse_prefix_sum, HYPRE_MEMORY_HOST); return 0; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCorrectCFMarker : corrects CF_marker after aggr. coarsening *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCorrectCFMarker(HYPRE_Int *CF_marker, HYPRE_Int num_var, HYPRE_Int *new_CF_marker) { HYPRE_Int i, cnt; cnt = 0; for (i=0; i < num_var; i++) { if (CF_marker[i] > 0 ) { if (CF_marker[i] == 1) CF_marker[i] = new_CF_marker[cnt++]; else { CF_marker[i] = 1; cnt++;} } } return 0; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCorrectCFMarker2 : corrects CF_marker after aggr. coarsening, * but marks new F-points (previous C-points) as -2 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCorrectCFMarker2(HYPRE_Int *CF_marker, HYPRE_Int num_var, HYPRE_Int *new_CF_marker) { HYPRE_Int i, cnt; cnt = 0; for (i=0; i < num_var; i++) { if (CF_marker[i] > 0 ) { if (new_CF_marker[cnt] == -1) CF_marker[i] = -2; else CF_marker[i] = 1; cnt++; } } return 0; }
likelihoods.h
/*! * This file is part of GPBoost a C++ library for combining * boosting with Gaussian process and mixed effects models * * Copyright (c) 2020 Fabio Sigrist. All rights reserved. * * Licensed under the Apache License Version 2.0. See LICENSE file in the project root for license information. */ #ifndef GPB_LIKELIHOODS_ #define GPB_LIKELIHOODS_ #define _USE_MATH_DEFINES // for M_SQRT1_2 and M_PI #include <cmath> #include <GPBoost/type_defs.h> #include <GPBoost/sparse_matrix_utils.h> #include <string> #include <set> #include <string> #include <vector> #include <cmath> #include <LightGBM/utils/log.h> using LightGBM::Log; //Mathematical constants usually defined in cmath #ifndef M_PI #define M_PI 3.1415926535897932384626433832795029 #endif //sqrt(2) #ifndef M_SQRT2 #define M_SQRT2 1.41421356237309504880 #endif //1/sqrt(2) #ifndef M_SQRT1_2 #define M_SQRT1_2 0.707106781186547524401 #endif //2/sqrt(pi) #ifndef M_2_SQRTPI #define M_2_SQRTPI 1.12837916709551257390 #endif #include <chrono> // only for debugging #include <thread> // only for debugging //std::chrono::steady_clock::time_point beginall = std::chrono::steady_clock::now();// only for debugging //std::chrono::steady_clock::time_point begin, end;// only for debugging //double el_time; //end = std::chrono::steady_clock::now();// only for debugging //el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - beginall).count()) / 1000000.;// Only for debugging //Log::REInfo("TOTAL TIME for mode calculation: %g", el_time);// Only for debugging namespace GPBoost { /*! * \brief This class implements the likelihoods for the Gaussian proceses * The template parameters <T_mat, T_chol> can be either <den_mat_t, chol_den_mat_t> or <sp_mat_t, chol_sp_mat_t> */ template<typename T_mat, typename T_chol> class Likelihood { public: /*! \brief Constructor */ Likelihood(); /*! * \brief Constructor * \param likelihood Type of likelihood */ Likelihood(string_t type, data_size_t num_data, data_size_t num_re) { string_t likelihood = ParseLikelihoodAlias(type); if (SUPPORTED_LIKELIHOODS_.find(likelihood) == SUPPORTED_LIKELIHOODS_.end()) { Log::REFatal("Likelihood of type '%s' is not supported.", likelihood.c_str()); } likelihood_type_ = likelihood; num_data_ = num_data; num_re_ = num_re; if (likelihood_type_ == "gamma") { aux_pars_ = { 1. };//shape parameter, TODO: also estimate this parameter } chol_fact_pattern_analyzed_ = false; } /*! * \brief Initialize mode vector_ (used in Laplace approximation for non-Gaussian data) */ void InitializeModeAvec() { mode_ = vec_t::Zero(num_re_); mode_previous_value_ = vec_t::Zero(num_re_); mode_initialized_ = true; first_deriv_ll_ = vec_t(num_data_); second_deriv_neg_ll_ = vec_t(num_data_); } /*! * \brief Reset mode to previous value. This is used if too large step-sizes are done which result in increases in the objective function. " The values (covariance parameters and linear coefficients) are then discarded and consequently the mode should also be reset to the previous value) */ void ResetModeToPreviousValue() { CHECK(mode_initialized_); mode_ = mode_previous_value_; } /*! \brief Destructor */ ~Likelihood() { } /*! * \brief Returns the type of likelihood */ string_t GetLikelihood() const { return(likelihood_type_); } /*! * \brief Set the type of likelihood * \param type Likelihood name */ void SetLikelihood(const string_t& type) { string_t likelihood = ParseLikelihoodAlias(type); if (SUPPORTED_LIKELIHOODS_.find(likelihood) == SUPPORTED_LIKELIHOODS_.end()) { Log::REFatal("Likelihood of type '%s' is not supported.", likelihood.c_str()); } likelihood_type_ = likelihood; chol_fact_pattern_analyzed_ = false; } /*! * \brief Returns the type of the response variable (label). Either "double" or "int" */ string_t label_type() const { if (likelihood_type_ == "bernoulli_probit" || likelihood_type_ == "bernoulli_logit" || likelihood_type_ == "poisson") { return("int"); } else { return("double"); } } /*! * \brief Checks whether the response variables (labels) have the correct values * \param y_data Response variable data * \param num_data Number of data points */ template <typename T>//T can be double or float void CheckY(const T* y_data, const data_size_t num_data) const { if (likelihood_type_ == "bernoulli_probit" || likelihood_type_ == "bernoulli_logit") { //#pragma omp parallel for schedule(static)//problematic with error message below... for (data_size_t i = 0; i < num_data; ++i) { if (fabs(y_data[i]) >= EPSILON_ && !AreSame<T>(y_data[i], 1.)) { Log::REFatal("Response variable (label) data needs to be 0 or 1 for likelihood of type '%s'.", likelihood_type_.c_str()); } } } else if (likelihood_type_ == "poisson") { for (data_size_t i = 0; i < num_data; ++i) { if (y_data[i] < 0) { Log::REFatal("Found negative response variable. Response variable cannot be negative for likelihood of type '%s'.", likelihood_type_.c_str()); } else { double intpart; if (std::modf(y_data[i], &intpart) != 0.0) { Log::REFatal("Found non-integer response variable. Response variable can only be integer valued for likelihood of type '%s'.", likelihood_type_.c_str()); } } } } else if (likelihood_type_ == "gamma") { for (data_size_t i = 0; i < num_data; ++i) { if (y_data[i] < 0) { Log::REFatal("Found negative response variable. Response variable cannot be negative for likelihood of type '%s'.", likelihood_type_.c_str()); } } } } /*! * \brief Calculate normalizing constant for (log-)likelihood calculation * \param y_data Response variable data * \param num_data Number of data points */ template <typename T>//T can be double or int void CalculateNormalizingConstant(const T* y_data, const data_size_t num_data) { if (likelihood_type_ == "poisson") { double log_normalizing_constant = 0.; #pragma omp parallel for schedule(static) reduction(+:log_normalizing_constant) for (data_size_t i = 0; i < num_data; ++i) { if (y_data[i] > 1) { double log_factorial = 0.; for (int k = 2; k <= y_data[i]; ++k) { log_factorial += std::log(k); } log_normalizing_constant += log_factorial; } } log_normalizing_constant_ = log_normalizing_constant; } else if (likelihood_type_ == "gamma") { // //Currently not used since aux_pars_[0]==1 and thus log_normalizing_constant_==0 // double log_normalizing_constant = 0.; //#pragma omp parallel for schedule(static) reduction(+:log_normalizing_constant) // for (data_size_t i = 0; i < num_data; ++i) { // log_normalizing_constant += -(aux_pars_[0] - 1.) * std::log(y_data[i]) - aux_pars_[0] * std::log(aux_pars_[0]) + std::tgamma(aux_pars_[0]); // } // log_normalizing_constant_ = log_normalizing_constant; log_normalizing_constant_ = 0. * y_data[0];//y_data[0] is just a trick to avoid compiler warnings complaning about unreferenced parameters... } normalizing_constant_has_been_calculated_ = true; } /*! * \brief Evaluate the log-likelihood conditional on the latent variable (=location_par) * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param location_par Location parameter (random plus fixed effects) * \param num_data Number of data points */ double LogLikelihood(const double* y_data, const int* y_data_int, const double* location_par, const data_size_t num_data) { if (!normalizing_constant_has_been_calculated_) { Log::REFatal("The normalizing constant has not been calculated. Call 'CalculateNormalizingConstant' first."); } double ll = 0.; if (likelihood_type_ == "bernoulli_probit") { #pragma omp parallel for schedule(static) reduction(+:ll) for (data_size_t i = 0; i < num_data; ++i) { if (y_data_int[i] == 0) { ll += std::log(1 - normalCDF(location_par[i])); } else { ll += std::log(normalCDF(location_par[i])); } } } else if (likelihood_type_ == "bernoulli_logit") { #pragma omp parallel for schedule(static) reduction(+:ll) for (data_size_t i = 0; i < num_data; ++i) { ll += y_data_int[i] * location_par[i] - std::log(1 + std::exp(location_par[i])); //Alternative version: //if (y_data_int[i] == 0) { // ll += std::log(1 - CondMeanLikelihood(location_par[i]));//CondMeanLikelihood = logistic function //} //else { // ll += std::log(CondMeanLikelihood(location_par[i])); //} } } else if (likelihood_type_ == "poisson") { #pragma omp parallel for schedule(static) reduction(+:ll) for (data_size_t i = 0; i < num_data; ++i) { ll += y_data_int[i] * location_par[i] - std::exp(location_par[i]); } ll -= log_normalizing_constant_; } else if (likelihood_type_ == "gamma") { #pragma omp parallel for schedule(static) reduction(+:ll) for (data_size_t i = 0; i < num_data; ++i) { ll += -aux_pars_[0] * (location_par[i] + y_data[i] * std::exp(-location_par[i])); } ll -= log_normalizing_constant_; } return(ll); } /*! * \brief Calculate the first derivative of the log-likelihood with respect to the location parameter * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param location_par Location parameter (random plus fixed effects) * \param num_data Number of data points */ void CalcFirstDerivLogLik(const double* y_data, const int* y_data_int, const double* location_par, const data_size_t num_data) { if (likelihood_type_ == "bernoulli_probit") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { if (y_data_int[i] == 0) { first_deriv_ll_[i] = -normalPDF(location_par[i]) / (1 - normalCDF(location_par[i])); } else { first_deriv_ll_[i] = normalPDF(location_par[i]) / normalCDF(location_par[i]); } } } else if (likelihood_type_ == "bernoulli_logit") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { first_deriv_ll_[i] = y_data_int[i] - CondMeanLikelihood(location_par[i]);//CondMeanLikelihood = logistic(x) } } else if (likelihood_type_ == "poisson") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { first_deriv_ll_[i] = y_data_int[i] - std::exp(location_par[i]); } } else if (likelihood_type_ == "gamma") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { first_deriv_ll_[i] = aux_pars_[0] * (y_data[i] * std::exp(-location_par[i]) - 1.); } } } /*! * \brief Calculate the second derivative of the negative (!) log-likelihood with respect to the location parameter * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param location_par Location parameter (random plus fixed effects) * \param num_data Number of data points */ void CalcSecondDerivNegLogLik(const double* y_data, const int* y_data_int, const double* location_par, const data_size_t num_data) { if (likelihood_type_ == "bernoulli_probit") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { double dnorm = normalPDF(location_par[i]); double pnorm = normalCDF(location_par[i]); if (y_data_int[i] == 0) { double dnorm_frac_one_min_pnorm = dnorm / (1. - pnorm); second_deriv_neg_ll_[i] = -dnorm_frac_one_min_pnorm * (location_par[i] - dnorm_frac_one_min_pnorm); } else { double dnorm_frac_pnorm = dnorm / pnorm; second_deriv_neg_ll_[i] = dnorm_frac_pnorm * (location_par[i] + dnorm_frac_pnorm); } } } else if (likelihood_type_ == "bernoulli_logit") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { double exp_loc_i = std::exp(location_par[i]); second_deriv_neg_ll_[i] = exp_loc_i * std::pow(1. + exp_loc_i, -2); } } else if (likelihood_type_ == "poisson") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { second_deriv_neg_ll_[i] = std::exp(location_par[i]); } } else if (likelihood_type_ == "gamma") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { second_deriv_neg_ll_[i] = aux_pars_[0] * y_data[i] * std::exp(-location_par[i]); } } } /*! * \brief Calculate the third derivative of the log-likelihood with respect to the location parameter * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param location_par Location parameter (random plus fixed effects) * \param num_data Number of data points * \param[out] third_deriv Third derivative of the log-likelihood with respect to the location parameter. Need to pre-allocate memory of size num_data */ void CalcThirdDerivLogLik(const double* y_data, const int* y_data_int, const double* location_par, const data_size_t num_data, double* third_deriv) { if (likelihood_type_ == "bernoulli_probit") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { double dnorm = normalPDF(location_par[i]); double pnorm = normalCDF(location_par[i]); if (y_data_int[i] == 0) { double dnorm_frac_one_min_pnorm = dnorm / (1. - pnorm); third_deriv[i] = dnorm_frac_one_min_pnorm * (1 - location_par[i] * location_par[i] + dnorm_frac_one_min_pnorm * (3 * location_par[i] - 2 * dnorm_frac_one_min_pnorm)); } else { double dnorm_frac_pnorm = dnorm / pnorm; third_deriv[i] = dnorm_frac_pnorm * (location_par[i] * location_par[i] - 1 + dnorm_frac_pnorm * (3 * location_par[i] + 2 * dnorm_frac_pnorm)); } } } else if (likelihood_type_ == "bernoulli_logit") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { double exp_loc_i = std::exp(location_par[i]); third_deriv[i] = -exp_loc_i * (1. - exp_loc_i) * std::pow(1 + exp_loc_i, -3); } } else if (likelihood_type_ == "poisson") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { third_deriv[i] = -std::exp(location_par[i]); } } else if (likelihood_type_ == "gamma") { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { third_deriv[i] = aux_pars_[0] * y_data[i] * std::exp(-location_par[i]); } } } /*! * \brief Calculate the mean of the likelihood conditional on the (predicted) latent variable * Used for adaptive Gauss-Hermite quadrature for the prediction of the response variable */ inline double CondMeanLikelihood(const double value) const { if (likelihood_type_ == "gaussian") { return value; } else if (likelihood_type_ == "bernoulli_probit") { return normalCDF(value); } else if (likelihood_type_ == "bernoulli_logit") { return 1. / (1. + std::exp(-value)); } else if (likelihood_type_ == "poisson") { return std::exp(value); } else if (likelihood_type_ == "gamma") { return std::exp(value); } else { Log::REFatal("CondMeanLikelihood: Likelihood of type '%s' is not supported.", likelihood_type_.c_str()); return 0.; } } /*! * \brief Calculate the first derivative of the logarithm of the mean of the likelihood conditional on the (predicted) latent variable * Used for adaptive Gauss-Hermite quadrature for the prediction of the response variable */ inline double FirstDerivLogCondMeanLikelihood(const double value) const { if (likelihood_type_ == "bernoulli_logit") { return 1. / (1. + std::exp(value)); } else if (likelihood_type_ == "poisson") { return 1.; } else if (likelihood_type_ == "gamma") { return 1.; } else { Log::REFatal("FirstDerivLogCondMeanLikelihood: Likelihood of type '%s' is not supported.", likelihood_type_.c_str()); return 0.; } } /*! * \brief Calculate the second derivative of the logarithm of the mean of the likelihood conditional on the (predicted) latent variable * Used for adaptive Gauss-Hermite quadrature for the prediction of the response variable */ inline double SecondDerivLogCondMeanLikelihood(const double value) const { if (likelihood_type_ == "bernoulli_logit") { double exp_x = std::exp(value); return -exp_x / ((1. + exp_x) * (1. + exp_x)); } else if (likelihood_type_ == "poisson") { return 0.; } else if (likelihood_type_ == "gamma") { return 0.; } else { Log::REFatal("SecondDerivLogCondMeanLikelihood: Likelihood of type '%s' is not supported.", likelihood_type_.c_str()); return 0.; } } /*! * \brief Do Cholesky decomposition * \param[out] chol_fact Cholesky factor * \param psi Matrix for which the Cholesky decomposition should be done */ template <class T_mat_1, typename std::enable_if< std::is_same<sp_mat_t, T_mat_1>::value>::type * = nullptr > void CalcChol(T_chol& chol_fact, const T_mat_1& psi) { if (!chol_fact_pattern_analyzed_) { chol_fact.analyzePattern(psi); chol_fact_pattern_analyzed_ = true; } chol_fact.factorize(psi); } template <class T_mat_1, typename std::enable_if< std::is_same<den_mat_t, T_mat_1>::value>::type * = nullptr > void CalcChol(T_chol& chol_fact, const T_mat_1& psi) { chol_fact.compute(psi); } /*! * \brief Apply permutation matrix of Cholesky factor (if it exists) * \param chol_fact Cholesky factor * \param M[out] Matrix to which the permutation is applied to */ template <class T_mat_1, typename std::enable_if< std::is_same<sp_mat_t, T_mat_1>::value>::type * = nullptr > void ApplyPermutationCholeskyFactor(const T_chol& chol_fact, T_mat_1& M) { if (chol_fact.permutationP().size() > 0) {//Apply permutation if an ordering is used M = chol_fact.permutationP() * M; } } template <class T_mat_1, typename std::enable_if< std::is_same<den_mat_t, T_mat_1>::value>::type * = nullptr > void ApplyPermutationCholeskyFactor(const T_chol&, T_mat_1&) { } /*! * \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood.. * Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt). * In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id". * This version is used for the Laplace approximation when dense matrices are used (e.g. GP models). * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param ZSigmaZt Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t) * \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode */ void FindModePostRandEffCalcMLLStable(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const std::shared_ptr<T_mat> ZSigmaZt, double& approx_marginal_ll) { // Initialize variables if (!mode_initialized_) { InitializeModeAvec(); } else { mode_previous_value_ = mode_; } bool no_fixed_effects = (fixed_effects == nullptr); vec_t location_par; // Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion if (no_fixed_effects) { approx_marginal_ll = LogLikelihood(y_data, y_data_int, mode_.data(), num_data); } else { location_par = vec_t(num_data); #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[i] + fixed_effects[i]; } approx_marginal_ll = LogLikelihood(y_data, y_data_int, location_par.data(), num_data); } double approx_marginal_ll_new; vec_t rhs, v_aux;//auxiliary variables sp_mat_t Wsqrt(num_data, num_data);//diagonal matrix with square root of negative second derivatives on the diagonal (sqrt of negative Hessian of log-likelihood) Wsqrt.setIdentity(); T_mat Id(num_data, num_data); Id.setIdentity(); T_mat Id_plus_Wsqrt_ZSigmaZt_Wsqrt; // Start finding mode int it; for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) { // Calculate first and second derivative of log-likelihood if (no_fixed_effects) { CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data); CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data); } else { CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data); CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); } // Calculate Cholesky factor of matrix B = Id + Wsqrt * Z*Sigma*Zt * Wsqrt Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt(); Id_plus_Wsqrt_ZSigmaZt_Wsqrt = Id + Wsqrt * (*ZSigmaZt) * Wsqrt; CalcChol<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Id_plus_Wsqrt_ZSigmaZt_Wsqrt); // Update mode and a_vec_ rhs.array() = second_deriv_neg_ll_.array() * mode_.array() + first_deriv_ll_.array(); v_aux = Wsqrt * (*ZSigmaZt) * rhs; a_vec_ = rhs - Wsqrt * (chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.solve(v_aux)); mode_ = (*ZSigmaZt) * a_vec_; // Calculate new objective function if (no_fixed_effects) { approx_marginal_ll_new = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, mode_.data(), num_data); } else { // Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function) #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[i] + fixed_effects[i]; } approx_marginal_ll_new = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data); } if (std::abs(approx_marginal_ll_new - approx_marginal_ll) / std::abs(approx_marginal_ll) < DELTA_REL_CONV_) { approx_marginal_ll = approx_marginal_ll_new; break; } else { approx_marginal_ll = approx_marginal_ll_new; } } if (it == MAXIT_MODE_NEWTON_) { Log::REDebug("Algorithm for finding mode for Laplace approximation has not converged after the maximal number of iterations"); } if (no_fixed_effects) { CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data); } else { CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); } Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt(); Id_plus_Wsqrt_ZSigmaZt_Wsqrt = Id + Wsqrt * (*ZSigmaZt) * Wsqrt; CalcChol<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Id_plus_Wsqrt_ZSigmaZt_Wsqrt); approx_marginal_ll -= ((T_mat)chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL()).diagonal().array().log().sum(); mode_has_been_calculated_ = true; ////Only for debugging //Log::REInfo("FindModePostRandEffCalcMLLStable"); //Log::REInfo("Number of iterations: %d", it); //Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll); //Log::REInfo("Mode"); //for (int i = 0; i < 10; ++i) { // Log::REInfo("mode_[%d]: %g", i, mode_[i]); //} //Log::REInfo("a"); //for (int i = 0; i < 5; ++i) { // Log::REInfo("a[%d]: %g", i, a_vec_[i]); //} }//end FindModePostRandEffCalcMLLStable /*! * \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood. * Calculations are done on the random effects (b) scale and not the "data scale" (Zb) using * a numerically stable variant based on factorizing ("inverting") B = (Id + ZtWZsqrt * Sigma * ZtWZsqrt). * This version is used for the Laplace approximation when there is only one Gaussian process and * there are a lot of multiple observations at the same location, i.e., the dimenion of the random effects b is much smaller than Zb * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param Sigma Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t) * \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related * \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode */ void FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const std::shared_ptr<T_mat> Sigma, const data_size_t * const random_effects_indices_of_data, double& approx_marginal_ll) { //std::chrono::steady_clock::time_point beginall = std::chrono::steady_clock::now();// only for debugging //std::chrono::steady_clock::time_point begin, end;// only for debugging //double el_time; // Initialize variables if (!mode_initialized_) { InitializeModeAvec(); } else { mode_previous_value_ = mode_; } vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects if (fixed_effects == nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]]; } } else { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i]; } } // Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion approx_marginal_ll = LogLikelihood(y_data, y_data_int, location_par.data(), num_data); double approx_marginal_ll_new; vec_t diag_sqrt_ZtWZ(num_re_);//sqrt of diagonal matrix ZtWZ T_mat Id(num_re_, num_re_); Id.setIdentity(); T_mat Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt; vec_t rhs, v_aux; int it; for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) { // Calculate first and second derivative of log-likelihood CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data); CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); // Calculate right hand side for mode update diag_sqrt_ZtWZ.setZero(); #pragma omp parallel { vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { diag_sqrt_ZtWZ[i_re] += diag_sqrt_ZtWZ_private[i_re]; } }//end omp critical }//end omp parallel //Non-parallel version //for (data_size_t i = 0; i < num_data; ++i) { // diag_sqrt_ZtWZ[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i]; //} rhs = (diag_sqrt_ZtWZ.array() * mode_.array()).matrix();//rhs = ZtWZ * mode_ + Zt * first_deriv_ll_ for updating mode #pragma omp parallel { vec_t rhs_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { rhs_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { rhs[i_re] += rhs_private[i_re]; } }//end omp critical }//end omp parallel // Calculate Cholesky factor of matrix B = Id + ZtWZsqrt * Sigma * ZtWZsqrt diag_sqrt_ZtWZ.array() = diag_sqrt_ZtWZ.array().sqrt(); Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt = Id + diag_sqrt_ZtWZ.asDiagonal() * (*Sigma) * diag_sqrt_ZtWZ.asDiagonal(); CalcChol<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt);//this is the bottleneck (for large data and sparse matrices) ////only for debugging //Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale: Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt: number non zeros = %d", GetNumberNonZeros<T_mat>(Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt));//only for debugging //T_mat chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt = chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL();//only for debugging //Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale: chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_: number non zeros = %d", GetNumberNonZeros<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt));//only for debugging // Update mode and a_vec_ v_aux = (*Sigma) * rhs; v_aux.array() *= diag_sqrt_ZtWZ.array(); a_vec_ = -chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.solve(v_aux); a_vec_.array() *= diag_sqrt_ZtWZ.array(); a_vec_.array() += rhs.array(); mode_ = (*Sigma) * a_vec_; // Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function) if (fixed_effects == nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]]; } } else { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i]; } } // Calculate new objective function approx_marginal_ll_new = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data); if (std::abs(approx_marginal_ll_new - approx_marginal_ll) / std::abs(approx_marginal_ll) < DELTA_REL_CONV_) { approx_marginal_ll = approx_marginal_ll_new; break; } else { approx_marginal_ll = approx_marginal_ll_new; } }//end loop for finding mode if (it == MAXIT_MODE_NEWTON_) { Log::REDebug("Algorithm for finding mode for Laplace approximation has not converged after the maximal number of iterations"); } CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); diag_sqrt_ZtWZ.setZero(); #pragma omp parallel { vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { diag_sqrt_ZtWZ[i_re] += diag_sqrt_ZtWZ_private[i_re]; } }//end omp critical }//end omp parallel diag_sqrt_ZtWZ.array() = diag_sqrt_ZtWZ.array().sqrt(); Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt = Id + diag_sqrt_ZtWZ.asDiagonal() * (*Sigma) * diag_sqrt_ZtWZ.asDiagonal(); CalcChol<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt); approx_marginal_ll -= ((T_mat)chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL()).diagonal().array().log().sum(); mode_has_been_calculated_ = true; ////Only for debugging //Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale"); //Log::REInfo("Number of iterations: %d", it); //Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll); //Log::REInfo("Mode"); //for (int i = 0; i < 10; ++i) { // Log::REInfo("mode_[%d]: %g", i, mode_[i]); //} //Log::REInfo("a"); //for (int i = 0; i < 5; ++i) { // Log::REInfo("a[%d]: %g", i, a_vec_[i]); //} //end = std::chrono::steady_clock::now();// only for debugging //el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - beginall).count()) / 1000000.;// Only for debugging //Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale: TOTAL TIME for mode calculation: %g", el_time);// Only for debugging }//end FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale /*! * \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood. * Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z). * NOTE: IT IS ASSUMED THAT SIGMA IS A DIAGONAL MATRIX * This version is used for the Laplace approximation when there are only grouped random effects. * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param SigmaI Inverse covariance matrix of latent random effect. Currently, this needs to be a diagonal matrix * \param Zt Transpose Z^T of random effect design matrix that relates latent random effects to observations/likelihoods * \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode */ void FindModePostRandEffCalcMLLGroupedRE(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const sp_mat_t& SigmaI, const sp_mat_t& Zt, double& approx_marginal_ll) { // Initialize variables if (!mode_initialized_) { InitializeModeAvec(); } else { mode_previous_value_ = mode_; } sp_mat_t Z = Zt.transpose(); vec_t location_par = Z * mode_;//location parameter = mode of random effects + fixed effects if (fixed_effects != nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] += fixed_effects[i]; } } // Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion approx_marginal_ll = LogLikelihood(y_data, y_data_int, location_par.data(), num_data); double approx_marginal_ll_new; sp_mat_t SigmaI_plus_ZtWZ; vec_t rhs; // Start finding mode int it; for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) { // Calculate first and second derivative of log-likelihood CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data); CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); // Calculate Cholesky factor and update mode rhs = Zt * first_deriv_ll_ - SigmaI * mode_;//right hand side for updating mode SigmaI_plus_ZtWZ = SigmaI + Zt * second_deriv_neg_ll_.asDiagonal() * Z; SigmaI_plus_ZtWZ.makeCompressed(); if (!chol_fact_pattern_analyzed_) { chol_fact_SigmaI_plus_ZtWZ_grouped_.analyzePattern(SigmaI_plus_ZtWZ); chol_fact_pattern_analyzed_ = true; } chol_fact_SigmaI_plus_ZtWZ_grouped_.factorize(SigmaI_plus_ZtWZ); mode_ += chol_fact_SigmaI_plus_ZtWZ_grouped_.solve(rhs); // Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function) location_par = Z * mode_; if (fixed_effects != nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] += fixed_effects[i]; } } // Calculate new objective function approx_marginal_ll_new = -0.5 * (mode_.dot(SigmaI * mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data); if (std::abs(approx_marginal_ll_new - approx_marginal_ll) / std::abs(approx_marginal_ll) < DELTA_REL_CONV_) { approx_marginal_ll = approx_marginal_ll_new; break; } else { approx_marginal_ll = approx_marginal_ll_new; } }//end mode finding algorithm if (it == MAXIT_MODE_NEWTON_) { Log::REDebug("Algorithm for finding mode for Laplace approximation has not converged after the maximal number of iterations"); } CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); SigmaI_plus_ZtWZ = SigmaI + Zt * second_deriv_neg_ll_.asDiagonal() * Z; SigmaI_plus_ZtWZ.makeCompressed(); chol_fact_SigmaI_plus_ZtWZ_grouped_.factorize(SigmaI_plus_ZtWZ); approx_marginal_ll += -((sp_mat_t)chol_fact_SigmaI_plus_ZtWZ_grouped_.matrixL()).diagonal().array().log().sum() + 0.5 * SigmaI.diagonal().array().log().sum(); mode_has_been_calculated_ = true; ////Only for debugging //Log::REInfo("FindModePostRandEffCalcMLLGroupedRE"); //Log::REInfo("Number of iterations: %d", it); //Log::REInfo("Mode"); //for (int i = 0; i < 10; ++i) { // Log::REInfo("mode_[%d]: %g", i, mode_[i]); //} //Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll); //double approx_marginal_ll_1 = -0.5 * (mode_.dot(SigmaI * mode_)); //double approx_marginal_ll_2 = LogLikelihood(y_data, y_data_int, location_par.data(), num_data); //double approx_marginal_ll_3 = 0.5 * diag_SigmaI_plus_ZtWZ_.array().log().sum() - 0.5 * SigmaI.diagonal().array().log().sum(); //Log::REInfo("approx_marginal_ll_1: %g", approx_marginal_ll_1); //Log::REInfo("approx_marginal_ll_2: %g", approx_marginal_ll_2); //Log::REInfo("approx_marginal_ll_3: %g", approx_marginal_ll_3); //std::this_thread::sleep_for(std::chrono::milliseconds(200)); }//end FindModePostRandEffCalcMLLGroupedRE /*! * \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood. * Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z). * This version is used for the Laplace approximation when there are only grouped random effects with only one grouping variable. * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param sigma2 Variance of random effects * \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related * \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode */ void FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const double sigma2, const data_size_t* const random_effects_indices_of_data, double& approx_marginal_ll) { // Initialize variables if (!mode_initialized_) { InitializeModeAvec(); } else { mode_previous_value_ = mode_; } vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects if (fixed_effects == nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]]; } } else { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i]; } } // Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion approx_marginal_ll = LogLikelihood(y_data, y_data_int, location_par.data(), num_data); double approx_marginal_ll_new; vec_t rhs; diag_SigmaI_plus_ZtWZ_ = vec_t(num_re_); // Start finding mode int it; for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) { // Calculate first and second derivative of log-likelihood CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data); CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); // Calculate rhs for mode update rhs = - mode_ / sigma2;//right hand side for updating mode #pragma omp parallel { vec_t rhs_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { rhs_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { rhs[i_re] += rhs_private[i_re]; } }//end omp critical }//end omp parallel // Update mode diag_SigmaI_plus_ZtWZ_.setZero(); #pragma omp parallel { vec_t diag_SigmaI_plus_ZtWZ_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { diag_SigmaI_plus_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { diag_SigmaI_plus_ZtWZ_[i_re] += diag_SigmaI_plus_ZtWZ_private[i_re]; } }//end omp critical }//end omp parallel diag_SigmaI_plus_ZtWZ_.array() += 1. / sigma2; mode_ += (rhs.array() / diag_SigmaI_plus_ZtWZ_.array()).matrix(); // Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function) if (fixed_effects == nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]]; } } else { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i]; } } // Calculate new objective function approx_marginal_ll_new = -0.5 / sigma2 * (mode_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data); if (std::abs(approx_marginal_ll_new - approx_marginal_ll) / std::abs(approx_marginal_ll) < DELTA_REL_CONV_) { approx_marginal_ll = approx_marginal_ll_new; break; } else { approx_marginal_ll = approx_marginal_ll_new; } }//end mode finding algorithm if (it == MAXIT_MODE_NEWTON_) { Log::REDebug("Algorithm for finding mode for Laplace approximation has not converged after the maximal number of iterations"); } CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); diag_SigmaI_plus_ZtWZ_.setZero(); #pragma omp parallel { vec_t diag_SigmaI_plus_ZtWZ_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { diag_SigmaI_plus_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { diag_SigmaI_plus_ZtWZ_[i_re] += diag_SigmaI_plus_ZtWZ_private[i_re]; } }//end omp critical }//end omp parallel diag_SigmaI_plus_ZtWZ_.array() += 1. / sigma2; approx_marginal_ll -= 0.5 * diag_SigmaI_plus_ZtWZ_.array().log().sum() + 0.5 * num_re_ * std::log(sigma2); mode_has_been_calculated_ = true; ////Only for debugging //Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale"); //Log::REInfo("Number of iterations: %d", it); //Log::REInfo("Mode"); //for (int i = 0; i < 10; ++i) { // Log::REInfo("mode_[%d]: %g", i, mode_[i]); //} //Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll); //std::this_thread::sleep_for(std::chrono::milliseconds(200)); }//end FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale /*! * \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood. * Calculations are done by factorizing ("inverting) (Sigma^-1 + W) where it is assumed that an approximate Cholesky factor * of Sigma^-1 has previously been calculated using a Vecchia approximation. * This version is used for the Laplace approximation when there are only GP random effects and the Vecchia approximation is used. * Caveat: Sigma^-1 + W can be not very sparse * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param B Matrix B in Vecchia approximation Sigma^-1 = B^T D^-1 B ("=" Cholesky factor) * \param D_inv Diagonal matrix D^-1 in Vecchia approximation Sigma^-1 = B^T D^-1 B * \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode */ void FindModePostRandEffCalcMLLVecchia(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const sp_mat_t& B, const sp_mat_t& D_inv, double& approx_marginal_ll) { // Initialize variables if (!mode_initialized_) { InitializeModeAvec(); } else { mode_previous_value_ = mode_; } bool no_fixed_effects = (fixed_effects == nullptr); sp_mat_t SigmaI = B.transpose() * D_inv * B; vec_t location_par;//location parameter = mode of random effects + fixed effects // Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion if (no_fixed_effects) { approx_marginal_ll = LogLikelihood(y_data, y_data_int, mode_.data(), num_data); } else { location_par = vec_t(num_data); #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[i] + fixed_effects[i]; } approx_marginal_ll = LogLikelihood(y_data, y_data_int, location_par.data(), num_data); } double approx_marginal_ll_new; sp_mat_t SigmaI_plus_W; vec_t rhs, B_mode; // Start finding mode int it; for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) { // Calculate first and second derivative of log-likelihood if (no_fixed_effects) { CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data); CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data); } else { CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data); CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); } // Calculate Cholesky factor and update mode rhs.array() = second_deriv_neg_ll_.array() * mode_.array() + first_deriv_ll_.array();//right hand side for updating mode SigmaI_plus_W = SigmaI; SigmaI_plus_W.diagonal().array() += second_deriv_neg_ll_.array(); SigmaI_plus_W.makeCompressed(); //Calculation of the Cholesky factor is the bottleneck if (!chol_fact_pattern_analyzed_) { chol_fact_SigmaI_plus_ZtWZ_vecchia_.analyzePattern(SigmaI_plus_W); chol_fact_pattern_analyzed_ = true; } chol_fact_SigmaI_plus_ZtWZ_vecchia_.factorize(SigmaI_plus_W);//This is the bottleneck for large data //Log::REInfo("SigmaI_plus_W: number non zeros = %d", (int)SigmaI_plus_W.nonZeros());//only for debugging //Log::REInfo("chol_fact_SigmaI_plus_ZtWZ: Number non zeros = %d", (int)((sp_mat_t)chol_fact_SigmaI_plus_ZtWZ_vecchia_.matrixL()).nonZeros());//only for debugging mode_ = chol_fact_SigmaI_plus_ZtWZ_vecchia_.solve(rhs); // Calculate new objective function B_mode = B * mode_; if (no_fixed_effects) { approx_marginal_ll_new = -0.5 * (B_mode.dot(D_inv * B_mode)) + LogLikelihood(y_data, y_data_int, mode_.data(), num_data); } else { // Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function) #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[i] + fixed_effects[i]; } approx_marginal_ll_new = -0.5 * (B_mode.dot(D_inv * B_mode)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data); } if (std::abs(approx_marginal_ll_new - approx_marginal_ll) / std::abs(approx_marginal_ll) < DELTA_REL_CONV_) { approx_marginal_ll = approx_marginal_ll_new; break; } else { approx_marginal_ll = approx_marginal_ll_new; } } // end loop for mode finding if (it == MAXIT_MODE_NEWTON_) { Log::REDebug("Algorithm for finding mode for Laplace approximation has not converged after the maximal number of iterations"); } if (no_fixed_effects) { CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data); } else { CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data); } SigmaI_plus_W = SigmaI; SigmaI_plus_W.diagonal().array() += second_deriv_neg_ll_.array(); SigmaI_plus_W.makeCompressed(); chol_fact_SigmaI_plus_ZtWZ_vecchia_.factorize(SigmaI_plus_W); approx_marginal_ll += -((sp_mat_t)chol_fact_SigmaI_plus_ZtWZ_vecchia_.matrixL()).diagonal().array().log().sum() + 0.5 * D_inv.diagonal().array().log().sum(); mode_has_been_calculated_ = true; ////Only for debugging //Log::REInfo("FindModePostRandEffCalcMLLVecchia"); //Log::REInfo("Number of iterations: %d", it); //Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll); //Log::REInfo("Mode"); //for (int i = 0; i < 10; ++i) { // Log::REInfo("mode_[%d]: %g", i, mode_[i]); //} //std::this_thread::sleep_for(std::chrono::milliseconds(200)); }//end FindModePostRandEffCalcMLLVecchia /*! * \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients * Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt). * In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id". * This version is used for the Laplace approximation when dense matrices are used (e.g. GP models). * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param ZSigmaZt Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t) * \param re_comps_cluster_i Vector with different random effects components. We pass the component pointers to save memory in order to avoid passing a large collection of gardient covariance matrices in memory//TODO: better way than passing this? (relying on all gradients in a vector can lead to large memory consumption) * \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated * \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated * \param[out] cov_grad Gradient of approximate marginal log-likelihood wrt covariance parameters (needs to be preallocated of size num_cov_par) * \param[out] fixed_effect_grad Gradient of approximate marginal log-likelihood wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying) * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void CalcGradNegMargLikelihoodLAApproxStable(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const std::shared_ptr<T_mat> ZSigmaZt, const std::vector<std::shared_ptr<RECompBase<T_mat>>>& re_comps_cluster_i, bool calc_cov_grad, bool calc_F_grad, double* cov_grad, vec_t& fixed_effect_grad, bool calc_mode = false) { if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLStable(y_data, y_data_int, fixed_effects, num_data, ZSigmaZt, mll); } else { CHECK(mode_has_been_calculated_); } // Initialize variables bool no_fixed_effects = (fixed_effects == nullptr); vec_t location_par;//location parameter = mode of random effects + fixed effects T_mat L_inv_Wsqrt(num_data, num_data);//diagonal matrix with square root of negative second derivatives on the diagonal (sqrt of negative Hessian of log-likelihood) L_inv_Wsqrt.setIdentity(); L_inv_Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt(); vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood if (no_fixed_effects) { CalcThirdDerivLogLik(y_data, y_data_int, mode_.data(), num_data, third_deriv.data()); } else { location_par = vec_t(num_data); #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[i] + fixed_effects[i]; } CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data()); } ApplyPermutationCholeskyFactor<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, L_inv_Wsqrt); chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL().solveInPlace(L_inv_Wsqrt);//L_inv_Wsqrt = L\Wsqrt T_mat L_inv_Wsqrt_ZSigmaZt = L_inv_Wsqrt * (*ZSigmaZt); // calculate gradient wrt covariance parameters if (calc_cov_grad) { T_mat WI_plus_Sigma_inv = L_inv_Wsqrt.transpose() * L_inv_Wsqrt;//WI_plus_Sigma_inv = Wsqrt * L^T\(L\Wsqrt) = (W^-1 + Sigma)^-1 // calculate gradient of approx. marginal log-likelihood wrt the mode // note: use (i) (Sigma^-1 + W)^-1 = Sigma - Sigma*(W^-1 + Sigma)^-1*Sigma = ZSigmaZt - L_inv_Wsqrt_ZSigmaZt^T*L_inv_Wsqrt_ZSigmaZt and (ii) "Z=Id" vec_t d_mll_d_mode = (-0.5 * ((*ZSigmaZt).diagonal() - ((T_mat)(L_inv_Wsqrt_ZSigmaZt.transpose() * L_inv_Wsqrt_ZSigmaZt)).diagonal()).array() * third_deriv.array()).matrix(); vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter vec_t v_aux;//auxiliary variable for caclulating d_mode_d_par int par_count = 0; double explicit_derivative; for (int j = 0; j < (int)re_comps_cluster_i.size(); ++j) { for (int ipar = 0; ipar < re_comps_cluster_i[j]->NumCovPar(); ++ipar) { std::shared_ptr<T_mat> SigmaDeriv = re_comps_cluster_i[j]->GetZSigmaZtGrad(ipar, true, 1.); // calculate explicit derivative of approx. mariginal log-likelihood explicit_derivative = -0.5 * (double)(a_vec_.transpose() * (*SigmaDeriv) * a_vec_) + 0.5 * (WI_plus_Sigma_inv.cwiseProduct(*SigmaDeriv)).sum(); // calculate implicit derivative (through mode) of approx. mariginal log-likelihood v_aux = (*SigmaDeriv) * first_deriv_ll_; d_mode_d_par = (v_aux.array() - ((*ZSigmaZt) * WI_plus_Sigma_inv * v_aux).array()).matrix(); cov_grad[par_count] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par); par_count++; } } ////Only for debugging //Log::REInfo("explicit_derivative: %g", explicit_derivative); //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]); //} //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]); //} //Log::REInfo("cov_grad"); //for (int i = 0; i < par_count; ++i) { // Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]); //} }//end calc_cov_grad // calculate gradient wrt fixed effects if (calc_F_grad) { T_mat L_inv_Wsqrt_ZSigmaZt_sqr = L_inv_Wsqrt_ZSigmaZt.cwiseProduct(L_inv_Wsqrt_ZSigmaZt); vec_t ZSigmaZtI_plus_W_inv_diag = (*ZSigmaZt).diagonal() - L_inv_Wsqrt_ZSigmaZt_sqr.transpose() * vec_t::Ones(L_inv_Wsqrt_ZSigmaZt_sqr.rows());// diagonal of (ZSigmaZt^-1 + W) ^ -1 vec_t d_mll_d_mode = (-0.5 * ZSigmaZtI_plus_W_inv_diag.array() * third_deriv.array()).matrix();// gradient of approx. marginal likelihood wrt the mode and thus also F here vec_t L_inv_Wsqrt_ZSigmaZt_d_mll_d_mode = L_inv_Wsqrt_ZSigmaZt * d_mll_d_mode;// for implicit derivative vec_t ZSigmaZtI_plus_W_inv_d_mll_d_mode = (*ZSigmaZt) * d_mll_d_mode - L_inv_Wsqrt_ZSigmaZt.transpose() * L_inv_Wsqrt_ZSigmaZt_d_mll_d_mode; vec_t d_mll_d_F_implicit = (ZSigmaZtI_plus_W_inv_d_mll_d_mode.array() * second_deriv_neg_ll_.array()).matrix();// implicit derivative fixed_effect_grad = -first_deriv_ll_ + d_mll_d_mode - d_mll_d_F_implicit; }//end calc_F_grad }//end CalcGradNegMargLikelihoodLAApproxStable /*! * \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients * Calculations are done on the random effects (b) scale and not the "data scale" (Zb) using * a numerically stable variant based on factorizing ("inverting") B = (Id + ZtWZsqrt * Sigma * ZtWZsqrt). * This version is used for the Laplace approximation when there is only one Gaussian process and * there are a lot of multiple observations at the same location, i.e., the dimenion of the random effects b is much smaller than Zb * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param Sigma Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t) * \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related * \param re_comps_cluster_i Vector with different random effects components. We pass the component pointers to save memory in order to avoid passing a large collection of gardient covariance matrices in memory//TODO: better way than passing this? (relying on all gradients in a vector can lead to large memory consumption) * \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated * \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated * \param[out] cov_grad Gradient of approximate marginal log-likelihood wrt covariance parameters (needs to be preallocated of size num_cov_par) * \param[out] fixed_effect_grad Gradient of approximate marginal log-likelihood wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying) * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const std::shared_ptr<T_mat> Sigma, const data_size_t* const random_effects_indices_of_data, const std::vector<std::shared_ptr<RECompBase<T_mat>>> & re_comps_cluster_i, bool calc_cov_grad, bool calc_F_grad, double* cov_grad, vec_t & fixed_effect_grad, bool calc_mode = false) { //std::chrono::steady_clock::time_point beginall = std::chrono::steady_clock::now();// only for debugging //std::chrono::steady_clock::time_point begin, end;// only for debugging //double el_time; CHECK(re_comps_cluster_i.size() == 1); if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale(y_data, y_data_int, fixed_effects, num_data, Sigma, random_effects_indices_of_data, mll); } else { CHECK(mode_has_been_calculated_); } // Initialize variables vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects if (fixed_effects == nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]]; } } else { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i]; } } // Matrix ZtWZsqrt vec_t diag_ZtWZ = vec_t::Zero(num_re_); #pragma omp parallel { vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { diag_ZtWZ.array()[i_re] += diag_sqrt_ZtWZ_private[i_re]; } }//end omp critical }//end omp parallel T_mat L_inv_ZtWZsqrt(num_re_, num_re_);//diagonal matrix with square root of diagonal of ZtWZ L_inv_ZtWZsqrt.setIdentity(); L_inv_ZtWZsqrt.diagonal().array() = diag_ZtWZ.array().sqrt(); vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data()); vec_t diag_ZtThirdDerivZ(num_re_);//sqrt of diagonal matrix ZtWZ diag_ZtThirdDerivZ.setZero(); #pragma omp parallel { vec_t diag_ZtThirdDerivZ_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { diag_ZtThirdDerivZ_private[random_effects_indices_of_data[i]] += third_deriv[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { diag_ZtThirdDerivZ[i_re] += diag_ZtThirdDerivZ_private[i_re]; } }//end omp critical }//end omp parallel ApplyPermutationCholeskyFactor<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, L_inv_ZtWZsqrt); chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL().solveInPlace(L_inv_ZtWZsqrt);//L_inv_ZtWZsqrt = L\ZtWZsqrt //This is the bottleneck (in this first part) for large data when using sparse matrices T_mat L_inv_ZtWZsqrt_Sigma = L_inv_ZtWZsqrt * (*Sigma); ////Only for debugging //Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale: L_inv_ZtWZsqrt: number non zeros = %d", GetNumberNonZeros<T_mat>(L_inv_ZtWZsqrt));//Only for debugging //Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale: L_inv_ZtWZsqrt_Sigma: number non zeros = %d", GetNumberNonZeros<T_mat>(L_inv_ZtWZsqrt_Sigma));//Only for debugging // calculate gradient wrt covariance parameters if (calc_cov_grad) { vec_t ZtFirstDeriv(num_re_);//sqrt of diagonal matrix ZtWZ ZtFirstDeriv.setZero(); #pragma omp parallel { vec_t ZtFirstDeriv_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { ZtFirstDeriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { ZtFirstDeriv[i_re] += ZtFirstDeriv_private[i_re]; } }//end omp critical }//end omp parallel T_mat ZtWZI_Sigma_inv = L_inv_ZtWZsqrt.transpose() * L_inv_ZtWZsqrt;//ZtWZI_Sigma_inv = ZtWZsqrt * L^T\(L\ZtWZsqrt) = ((ZtWZ)^-1 + Sigma)^-1 // calculate gradient of approx. marginal log-likelihood wrt the mode // note: use (i) (Sigma^-1 + W)^-1 = Sigma - Sigma*(W^-1 + Sigma)^-1*Sigma = ZSigmaZt - L_inv_ZtWZsqrt_Sigma^T*L_inv_ZtWZsqrt_Sigma vec_t d_mll_d_mode = (-0.5 * ((*Sigma).diagonal() - ((T_mat)(L_inv_ZtWZsqrt_Sigma.transpose() * L_inv_ZtWZsqrt_Sigma)).diagonal()).array() * diag_ZtThirdDerivZ.array()).matrix(); vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter vec_t v_aux;//auxiliary variable for caclulating d_mode_d_par int par_count = 0; double explicit_derivative; for (int j = 0; j < (int)re_comps_cluster_i.size(); ++j) { for (int ipar = 0; ipar < re_comps_cluster_i[j]->NumCovPar(); ++ipar) { std::shared_ptr<T_mat> SigmaDeriv = re_comps_cluster_i[j]->GetZSigmaZtGrad(ipar, true, 1.); // calculate explicit derivative of approx. mariginal log-likelihood explicit_derivative = -0.5 * (double)(a_vec_.transpose() * (*SigmaDeriv) * a_vec_) + 0.5 * (ZtWZI_Sigma_inv.cwiseProduct(*SigmaDeriv)).sum(); // calculate implicit derivative (through mode) of approx. mariginal log-likelihood v_aux = (*SigmaDeriv) * ZtFirstDeriv; d_mode_d_par = (v_aux.array() - ((*Sigma) * ZtWZI_Sigma_inv * v_aux).array()).matrix(); cov_grad[par_count] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par); par_count++; } } ////Only for debugging //Log::REInfo("explicit_derivative: %g", explicit_derivative); //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]); //} //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]); //} //Log::REInfo("cov_grad"); //for (int i = 0; i < par_count; ++i) { // Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]); //} }//end calc_cov_grad // calculate gradient wrt fixed effects if (calc_F_grad) { T_mat L_inv_ZtWZsqrt_Sigma_sqr = L_inv_ZtWZsqrt_Sigma.cwiseProduct(L_inv_ZtWZsqrt_Sigma); vec_t SigmaI_plus_ZtWZ_inv_diag = (*Sigma).diagonal() - L_inv_ZtWZsqrt_Sigma_sqr.transpose() * vec_t::Ones(L_inv_ZtWZsqrt_Sigma_sqr.rows());// diagonal of (Sigma^-1 + ZtWZ) ^ -1 vec_t d_mll_d_mode = (-0.5 * SigmaI_plus_ZtWZ_inv_diag.array() * diag_ZtThirdDerivZ.array()).matrix();// gradient of approx. marginal likelihood wrt the mode vec_t L_inv_ZtWZsqrt_Sigma_d_mll_d_mode = L_inv_ZtWZsqrt_Sigma * d_mll_d_mode;// for implicit derivative vec_t SigmaI_plus_ZtWZ_inv_d_mll_d_mode = (*Sigma) * d_mll_d_mode - L_inv_ZtWZsqrt_Sigma.transpose() * L_inv_ZtWZsqrt_Sigma_d_mll_d_mode; fixed_effect_grad = -first_deriv_ll_; #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { fixed_effect_grad[i] += -0.5 * third_deriv[i] * SigmaI_plus_ZtWZ_inv_diag[random_effects_indices_of_data[i]] - second_deriv_neg_ll_[i] * SigmaI_plus_ZtWZ_inv_d_mll_d_mode[random_effects_indices_of_data[i]]; } }//end calc_F_grad //end = std::chrono::steady_clock::now();// only for debugging //el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - beginall).count()) / 1000000.;// Only for debugging //Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale: TOTAL TIME: %g", el_time);// Only for debugging }//end CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale /*! * \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients * Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z). * NOTE: IT IS ASSUMED THAT SIGMA IS A DIAGONAL MATRIX * This version is used for the Laplace approximation when there are only grouped random effects. * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param SigmaI Inverse covariance matrix of latent random effect. Currently, this needs to be a diagonal matrix * \param Zt Transpose Z^T of random effect design matrix that relates latent random effects to observations/likelihoods * \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated * \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated * \param[out] cov_grad Gradient wrt covariance parameters (needs to be preallocated of size num_cov_par) * \param[out] fixed_effect_grad Gradient wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying) * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void CalcGradNegMargLikelihoodLAApproxGroupedRE(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const sp_mat_t& SigmaI, const sp_mat_t& Zt, std::vector<data_size_t> cum_num_rand_eff_cluster_i, bool calc_cov_grad, bool calc_F_grad, double* cov_grad, vec_t& fixed_effect_grad, bool calc_mode = false) { int num_REs = (int)SigmaI.cols();//number of random effect realizations int num_comps = (int)cum_num_rand_eff_cluster_i.size() - 1;//number of different random effect components if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLGroupedRE(y_data, y_data_int, fixed_effects, num_data, SigmaI, Zt, mll); } else { CHECK(mode_has_been_calculated_); } // Initialize variables sp_mat_t Z = Zt.transpose(); vec_t location_par = Z * mode_;//location parameter = mode of random effects + fixed effects if (fixed_effects != nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] += fixed_effects[i]; } } vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data()); // Calculate (Sigma^-1 + Zt*W*Z)^-1 sp_mat_t L_inv(num_REs, num_REs); L_inv.setIdentity(); if (chol_fact_SigmaI_plus_ZtWZ_grouped_.permutationP().size() > 0) {//Permutation is only used when having an ordering L_inv = chol_fact_SigmaI_plus_ZtWZ_grouped_.permutationP() * L_inv; } chol_fact_SigmaI_plus_ZtWZ_grouped_.matrixL().solveInPlace(L_inv); sp_mat_t SigmaI_plus_ZtWZ_inv = L_inv.transpose() * L_inv; // calculate gradient of approx. marginal likeligood wrt the mode //Note: the calculation of d_mll_d_mode is the bottleneck of this function (corresponding lines below are indicated with * and, in particular, **) vec_t d_mll_d_mode(num_REs); sp_mat_t Zt_third_deriv = Zt * third_deriv.asDiagonal();//every column of Z multiplied elementwise by third_deriv #pragma omp parallel for schedule(static) for (int i = 0; i < num_REs; ++i) { vec_t diag_d_W_d_mode_i = Zt_third_deriv.row(i);//*can be slow //calculate Z^T * diag(diag_d_W_d_mode_i) * Z = Z^T * diag(Z.col(i) * third_deriv) * Z sp_mat_t Zt_d_W_d_mode_i_Z = (Zt * diag_d_W_d_mode_i.asDiagonal() * Z).pruned();//**can be very slow. Note that this is also slow when the middle diagonal matrix is a pruned sparse matrix ////Variant 2: slower //sp_mat_t Zt_third_deriv_diag = sp_mat_t(((vec_t)Zt_third_deriv.row(i)).asDiagonal()); //sp_mat_t Zt_d_W_d_mode_i_Z = Zt * Zt_third_deriv_diag * Z;//= Z^T * diag(diag_d_W_d_mode_i) * Z = Z^T * diag(Z.col(i) * third_deriv) * Z ////Variant 3: slower //vec_t Z_i = Z.col(i);// column number i of Z //vec_t diag_d_W_d_mode_i = (Z_i.array() * third_deriv.array()).matrix();//diagonal of derivative of matrix W wrt random effect number i //sp_mat_t Zt_d_W_d_mode_i_Z = Zt * diag_d_W_d_mode_i.asDiagonal() * Z;//= Z^T * diag(diag_d_W_d_mode_i) * Z d_mll_d_mode[i] = -0.5 * (Zt_d_W_d_mode_i_Z.cwiseProduct(SigmaI_plus_ZtWZ_inv)).sum(); } // calculate gradient wrt covariance parameters if (calc_cov_grad) { sp_mat_t ZtWZ = Zt * second_deriv_neg_ll_.asDiagonal() * Z; vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter vec_t v_aux;//auxiliary variable for caclulating d_mode_d_par vec_t SigmaI_mode = SigmaI * mode_; double explicit_derivative; sp_mat_t I_j(num_REs, num_REs);//Diagonal matrix with 1 on the diagonal for all random effects of component j and 0's otherwise sp_mat_t I_j_ZtWZ; for (int j = 0; j < num_comps; ++j) { // calculate explicit derivative of approx. mariginal log-likelihood std::vector<Triplet_t> triplets;//for constructing I_j triplets.reserve(cum_num_rand_eff_cluster_i[j + 1] - cum_num_rand_eff_cluster_i[j]); explicit_derivative = 0.; for (int i = cum_num_rand_eff_cluster_i[j]; i < cum_num_rand_eff_cluster_i[j + 1]; ++i) { triplets.emplace_back(i, i, 1.); explicit_derivative += SigmaI_mode[i] * mode_[i]; } // Altervative version using parallelization (not faster) //#pragma omp parallel // { // std::vector<Triplet_t> triplets_private; // //triplets_private.reserve(cum_num_rand_eff_cluster_i[num_comps]); //#pragma omp for nowait reduction(+:explicit_derivative) // for (int i = cum_num_rand_eff_cluster_i[j]; i < cum_num_rand_eff_cluster_i[j + 1]; ++i) { // triplets_private.emplace_back(i, i, 1.); // explicit_derivative += SigmaI_mode[i] * mode_[i]; // } //#pragma omp critical // triplets.insert(triplets.end(), triplets_private.begin(), triplets_private.end()); // } //#pragma omp parallel for schedule(static) reduction(+:explicit_derivative) // for (int i = cum_num_rand_eff_cluster_i[j]; i < cum_num_rand_eff_cluster_i[j + 1]; ++i) { // explicit_derivative += SigmaI_mode[i] * mode_[i]; // } explicit_derivative *= -0.5; I_j.setFromTriplets(triplets.begin(), triplets.end()); I_j_ZtWZ = I_j * ZtWZ; explicit_derivative += 0.5 * (SigmaI_plus_ZtWZ_inv.cwiseProduct(I_j_ZtWZ)).sum(); // calculate implicit derivative (through mode) of approx. mariginal log-likelihood d_mode_d_par = SigmaI_plus_ZtWZ_inv * I_j * Zt * first_deriv_ll_; cov_grad[j] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par); } ////Only for debugging //Log::REInfo("CalcGradNegMargLikelihoodLAApproxGroupedRE"); //Log::REInfo("explicit_derivative: %g", explicit_derivative); //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]); //} //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]); //} //Log::REInfo("cov_grad"); //for (int i = 0; i < num_comps; ++i) { // Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]); //} }//end calc_cov_grad // calculate gradient wrt fixed effects if (calc_F_grad) { vec_t d_detmll_d_F(num_data); #pragma omp parallel for schedule(static) for (int i = 0; i < num_data; ++i) { sp_mat_t zi_zit = Zt.col(i) * Z.row(i);//=Z.row(i) * (Z.row(i)).transpose() d_detmll_d_F[i] = -0.5 * third_deriv[i] * (SigmaI_plus_ZtWZ_inv.cwiseProduct(zi_zit)).sum(); } vec_t d_mll_d_modeT_SigmaI_plus_ZtWZ_inv_Zt_W = d_mll_d_mode.transpose() * SigmaI_plus_ZtWZ_inv * Zt * second_deriv_neg_ll_.asDiagonal(); fixed_effect_grad = -first_deriv_ll_ + d_detmll_d_F - d_mll_d_modeT_SigmaI_plus_ZtWZ_inv_Zt_W; }//end calc_F_grad }//end CalcGradNegMargLikelihoodLAApproxGroupedRE /*! * \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients * Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z). * This version is used for the Laplace approximation when there are only grouped random effects with only one grouping variable. * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param sigma2 Variance of random effects * \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related * \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated * \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated * \param[out] cov_grad Gradient wrt covariance parameters (needs to be preallocated of size num_cov_par) * \param[out] fixed_effect_grad Gradient wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying) * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const double sigma2, const data_size_t* const random_effects_indices_of_data, bool calc_cov_grad, bool calc_F_grad, double* cov_grad, vec_t& fixed_effect_grad, bool calc_mode = false) { if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale(y_data, y_data_int, fixed_effects, num_data, sigma2, random_effects_indices_of_data, mll); } else { CHECK(mode_has_been_calculated_); } // Initialize variables vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects if (fixed_effects == nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]]; } } else { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i]; } } vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data()); // calculate gradient of approx. marginal likeligood wrt the mode vec_t d_mll_d_mode = vec_t::Zero(num_re_); #pragma omp parallel { vec_t third_deriv_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { third_deriv_private[random_effects_indices_of_data[i]] += third_deriv[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { d_mll_d_mode[i_re] += third_deriv_private[i_re]; } }//end omp critical }//end omp parallel d_mll_d_mode.array() /= -2. * diag_SigmaI_plus_ZtWZ_.array(); // calculate gradient wrt covariance parameters if (calc_cov_grad) { vec_t diag_ZtWZ = vec_t::Zero(num_re_); #pragma omp parallel { vec_t diag_ZtWZ_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { diag_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { diag_ZtWZ[i_re] += diag_ZtWZ_private[i_re]; } }//end omp critical }//end omp parallel double explicit_derivative = -0.5 * (mode_.array() * mode_.array()).sum() / sigma2 + 0.5 * (diag_ZtWZ.array() / diag_SigmaI_plus_ZtWZ_.array()).sum(); // calculate implicit derivative (through mode) of approx. mariginal log-likelihood vec_t d_mode_d_par = vec_t::Zero(num_re_); #pragma omp parallel { vec_t first_deriv_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { first_deriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { d_mode_d_par[i_re] += first_deriv_private[i_re]; } }//end omp critical }//end omp parallel d_mode_d_par.array() /= diag_SigmaI_plus_ZtWZ_.array(); cov_grad[0] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par); ////Only for debugging //Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale"); //Log::REInfo("explicit_derivative: %g", explicit_derivative); //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]); //} //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]); //} //Log::REInfo("cov_grad[0]: %g", cov_grad[0]); }//end calc_cov_grad // calculate gradient wrt fixed effects if (calc_F_grad) { #pragma omp parallel for schedule(static) for (int i = 0; i < num_data; ++i) { fixed_effect_grad[i] = -first_deriv_ll_[i] - 0.5 * third_deriv[i] / diag_SigmaI_plus_ZtWZ_[random_effects_indices_of_data[i]] - //=d_detmll_d_F d_mll_d_mode[random_effects_indices_of_data[i]] * second_deriv_neg_ll_[i] / diag_SigmaI_plus_ZtWZ_[random_effects_indices_of_data[i]];//=implicit derivative = d_mll_d_mode * d_mode_d_F } ////Only for debugging //Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale"); //for (int i = 0; i < 5; ++i) { // Log::REInfo("fixed_effect_grad[%d]: %g", i, fixed_effect_grad[i]); //} }//end calc_F_grad }//end CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale /*! * \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients * Calculations are done by factorizing ("inverting) (Sigma^-1 + W) where it is assumed that an approximate Cholesky factor * of Sigma^-1 has previously been calculated using a Vecchia approximation. * This version is used for the Laplace approximation when there are only GP random effects and the Vecchia approximation is used. * Caveat: Sigma^-1 + W can be not very sparse * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param B Matrix B in Vecchia approximation Sigma^-1 = B^T D^-1 B ("=" Cholesky factor) * \param D_inv Diagonal matrix D^-1 in Vecchia approximation Sigma^-1 = B^T D^-1 B * \param B_grad Derivatives of matrices B ( = derivative of matrix -A) for Vecchia approximation * \param D_grad Derivatives of matrices D for Vecchia approximation * \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated * \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated * \param[out] cov_grad Gradient of approximate marginal log-likelihood wrt covariance parameters (needs to be preallocated of size num_cov_par) * \param[out] fixed_effect_grad Gradient of approximate marginal log-likelihood wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying) * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void CalcGradNegMargLikelihoodLAApproxVecchia(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const sp_mat_t& B, const sp_mat_t& D_inv, const std::vector<sp_mat_t>& B_grad, const std::vector<sp_mat_t>& D_grad, bool calc_cov_grad, bool calc_F_grad, double* cov_grad, vec_t& fixed_effect_grad, bool calc_mode = false) { if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLVecchia(y_data, y_data_int, fixed_effects, num_data, B, D_inv, mll); } else { CHECK(mode_has_been_calculated_); } // Initialize variables bool no_fixed_effects = (fixed_effects == nullptr); vec_t location_par;//location parameter = mode of random effects + fixed effects vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood if (no_fixed_effects) { CalcThirdDerivLogLik(y_data, y_data_int, mode_.data(), num_data, third_deriv.data()); } else { location_par = vec_t(num_data); #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data; ++i) { location_par[i] = mode_[i] + fixed_effects[i]; } CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data()); } // Calculate (Sigma^-1 + W)^-1 sp_mat_t L_inv(num_data, num_data); L_inv.setIdentity(); if (chol_fact_SigmaI_plus_ZtWZ_vecchia_.permutationP().size() > 0) {//Permutation is only used when having an ordering L_inv = chol_fact_SigmaI_plus_ZtWZ_vecchia_.permutationP() * L_inv; } chol_fact_SigmaI_plus_ZtWZ_vecchia_.matrixL().solveInPlace(L_inv); // calculate gradient wrt covariance parameters if (calc_cov_grad) { sp_mat_t SigmaI_plus_W_inv = L_inv.transpose() * L_inv;//Note: this is the computational bottleneck for large data vec_t d_mll_d_mode = -0.5 * (SigmaI_plus_W_inv.diagonal().array() * third_deriv.array()).matrix();// gradient of approx. marginal likeligood wrt the mode vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter double explicit_derivative; int num_par = (int)B_grad.size(); sp_mat_t SigmaI_deriv; sp_mat_t BgradT_Dinv_B; sp_mat_t Bt_Dinv_Bgrad; for (int j = 0; j < num_par; ++j) { SigmaI_deriv = B_grad[j].transpose() * D_inv * B; Bt_Dinv_Bgrad = SigmaI_deriv.transpose(); SigmaI_deriv += Bt_Dinv_Bgrad - B.transpose() * D_inv * D_grad[j] * D_inv * B; d_mode_d_par = -SigmaI_plus_W_inv * SigmaI_deriv * mode_; explicit_derivative = 0.5 * mode_.dot(SigmaI_deriv * mode_) + 0.5 * ((D_inv.diagonal().array() * D_grad[j].diagonal().array()).sum() + (SigmaI_deriv.cwiseProduct(SigmaI_plus_W_inv)).sum()); // Alternative version (not faster) //vec_t u = D_inv * B * mode_; //vec_t uk = B_grad[j] * mode_; //explicit_derivative = uk.dot(u) - 0.5 * u.dot(D_grad[j] * u) + // 0.5 * ((D_inv.diagonal().array() * D_grad[j].diagonal().array()).sum() + (SigmaI_deriv.cwiseProduct(SigmaI_plus_W_inv)).sum()); cov_grad[j] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par); } ////Only for debugging //Log::REInfo("explicit_derivative: %g", explicit_derivative); //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]); //} //for (int i = 0; i < 5; ++i) { // Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]); //} //Log::REInfo("cov_grad"); //for (int i = 0; i < num_par; ++i) { // Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]); //} }//end calc_cov_grad // calculate gradient wrt fixed effects if (calc_F_grad) { sp_mat_t L_inv_sqr = L_inv.cwiseProduct(L_inv); vec_t SigmaI_plus_W_inv_diag = L_inv_sqr.transpose() * vec_t::Ones(L_inv_sqr.rows());// diagonal of (Sigma^-1 + W) ^ -1 vec_t d_mll_d_mode = (-0.5 * SigmaI_plus_W_inv_diag.array() * third_deriv.array()).matrix();// gradient of approx. marginal likelihood wrt the mode and thus also F here vec_t L_inv_d_mll_d_mode = L_inv * d_mll_d_mode;// for implicit derivative vec_t SigmaI_plus_W_inv_d_mll_d_mode = L_inv.transpose() * L_inv_d_mll_d_mode; vec_t d_mll_d_F_implicit = -(SigmaI_plus_W_inv_d_mll_d_mode.array() * second_deriv_neg_ll_.array()).matrix();// implicit derivative fixed_effect_grad = -first_deriv_ll_ + d_mll_d_mode + d_mll_d_F_implicit; }//end calc_F_grad }//end CalcGradNegMargLikelihoodLAApproxVecchia /*! * \brief Make predictions for the (latent) random effects when using the Laplace approximation. * Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt). * In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id". * This version is used for the Laplace approximation when dense matrices are used (e.g. GP models). * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param ZSigmaZt Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t) * \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)") * \param pred_mean[out] Predicted mean * \param pred_cov[out] Predicted covariance matrix * \param pred_var[out] Predicted variances * \param calc_pred_cov If true, predictive covariance matrix is also calculated * \param calc_pred_var If true, predictive variances are also calculated * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void PredictLAApproxStable(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const std::shared_ptr<T_mat> ZSigmaZt, const T_mat& Cross_Cov, vec_t& pred_mean, T_mat& pred_cov, vec_t& pred_var, bool calc_pred_cov = false, bool calc_pred_var = false, bool calc_mode = false) { if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLStable(y_data, y_data_int, fixed_effects, num_data, ZSigmaZt, mll); } else { CHECK(mode_has_been_calculated_); } pred_mean = Cross_Cov * first_deriv_ll_; if (calc_pred_cov || calc_pred_var) { sp_mat_t Wsqrt(num_data, num_data);//diagonal matrix with square root of negative second derivatives on the diagonal (sqrt of negative Hessian of log-likelihood) Wsqrt.setIdentity(); Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt(); T_mat Maux = Wsqrt * Cross_Cov.transpose(); ApplyPermutationCholeskyFactor<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Maux); chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL().solveInPlace(Maux); if (calc_pred_cov) { pred_cov -= Maux.transpose() * Maux; } if (calc_pred_var) { Maux = Maux.cwiseProduct(Maux); #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { pred_var[i] -= Maux.col(i).sum(); } } } ////Only for debugging //Log::REInfo("PredictLAApproxStable"); //for (int i = 0; i < 3; ++i) { // Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i)); //} //for (int i = 0; i < 3; ++i) { // Log::REInfo("first_deriv_ll_[%d]: %g", i, first_deriv_ll_[i]); //} //for (int i = 0; i < 3; ++i) { // Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]); //} //if (calc_pred_var) { // for (int i = 0; i < 3; ++i) { // Log::REInfo("pred_var[%d]: %g", i, pred_var[i]); // } //} }//end PredictLAApproxStable /*! * \brief Make predictions for the (latent) random effects when using the Laplace approximation. * Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt). * In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id". * This version is used for the Laplace approximation when dense matrices are used (e.g. GP models). * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param Sigma Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t) * \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related * \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)") * \param pred_mean[out] Predicted mean * \param pred_cov[out] Predicted covariance matrix * \param pred_var[out] Predicted variances * \param calc_pred_cov If true, predictive covariance matrix is also calculated * \param calc_pred_var If true, predictive variances are also calculated * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void PredictLAApproxOnlyOneGPCalculationsOnREScale(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const std::shared_ptr<T_mat> Sigma, const data_size_t* const random_effects_indices_of_data, const T_mat& Cross_Cov, vec_t& pred_mean, T_mat& pred_cov, vec_t& pred_var, bool calc_pred_cov = false, bool calc_pred_var = false, bool calc_mode = false) { if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale(y_data, y_data_int, fixed_effects, num_data, Sigma, random_effects_indices_of_data, mll); } else { CHECK(mode_has_been_calculated_); } vec_t ZtFirstDeriv = vec_t::Zero(num_re_);//sqrt of diagonal matrix ZtWZ #pragma omp parallel { vec_t ZtFirstDeriv_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { ZtFirstDeriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { ZtFirstDeriv[i_re] += ZtFirstDeriv_private[i_re]; } }//end omp critical }//end omp parallel pred_mean = Cross_Cov * ZtFirstDeriv; if (calc_pred_cov || calc_pred_var) { vec_t diag_ZtWZ = vec_t::Zero(num_re_); #pragma omp parallel { vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { diag_ZtWZ.array()[i_re] += diag_sqrt_ZtWZ_private[i_re]; } }//end omp critical }//end omp parallel sp_mat_t ZtWZsqrt(num_re_, num_re_);//diagonal matrix with square root of diagonal of ZtWZ ZtWZsqrt.setIdentity(); ZtWZsqrt.diagonal().array() = diag_ZtWZ.array().sqrt(); T_mat Maux = ZtWZsqrt * Cross_Cov.transpose(); ApplyPermutationCholeskyFactor<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Maux); chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL().solveInPlace(Maux);//Maux = L\(ZtWZsqrt * Cross_Cov^T) if (calc_pred_cov) { pred_cov -= Maux.transpose() * Maux; } if (calc_pred_var) { Maux = Maux.cwiseProduct(Maux); #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { pred_var[i] -= Maux.col(i).sum(); } } } ////Only for debugging //Log::REInfo("PredictLAApproxOnlyOneGPCalculationsOnREScale"); //for (int i = 0; i < 3; ++i) { // if (Cross_Cov.rows() > 1) { // Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i)); // } // else { // Log::REInfo("Cross_Cov[0,%d]: %g", i, Cross_Cov.coeff(0, i)); // } //} //for (int i = 0; i < 3; ++i) { // Log::REInfo("ZtFirstDeriv[%d]: %g", i, ZtFirstDeriv[i]); //} //for (int i = 0; i < std::min((int)pred_mean.size(),3); ++i) { // Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]); //} //if (calc_pred_var) { // for (int i = 0; i < 3; ++i) { // Log::REInfo("pred_var[%d]: %g", i, pred_var[i]); // } //} }//end PredictLAApproxOnlyOneGPCalculationsOnREScale /*! * \brief Make predictions for the (latent) random effects when using the Laplace approximation. * Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z). * NOTE: IT IS ASSUMED THAT SIGMA IS A DIAGONAL MATRIX * This version is used for the Laplace approximation when there are only grouped random effects. * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param SigmaI Inverse covariance matrix of latent random effect. Currently, this needs to be a diagonal matrix * \param Zt Transpose Z^T of random effect design matrix that relates latent random effects to observations/likelihoods * \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)") * \param pred_mean[out] Predicted mean * \param pred_cov[out] Predicted covariance matrix * \param pred_var[out] Predicted variances * \param calc_pred_cov If true, predictive covariance matrix is also calculated * \param calc_pred_var If true, predictive variances are also calculated * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void PredictLAApproxGroupedRE(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const sp_mat_t& SigmaI, const sp_mat_t& Zt, const T_mat& Cross_Cov, vec_t& pred_mean, T_mat& pred_cov, vec_t& pred_var, bool calc_pred_cov = false, bool calc_pred_var = false, bool calc_mode = false) { if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLGroupedRE(y_data, y_data_int, fixed_effects, num_data, SigmaI, Zt, mll); } else { CHECK(mode_has_been_calculated_); } pred_mean = Cross_Cov * first_deriv_ll_; if (calc_pred_cov || calc_pred_var) { // calculate Maux = L\(Z^T * second_deriv_neg_ll_.asDiagonal() * Cross_Cov^T) T_mat Maux = Zt * second_deriv_neg_ll_.asDiagonal() * Cross_Cov.transpose(); if (chol_fact_SigmaI_plus_ZtWZ_grouped_.permutationP().size() > 0) {//Permutation is only used when having an ordering Maux = chol_fact_SigmaI_plus_ZtWZ_grouped_.permutationP() * Maux; } chol_fact_SigmaI_plus_ZtWZ_grouped_.matrixL().solveInPlace(Maux); if (calc_pred_cov) { pred_cov += Maux.transpose() * Maux - (T_mat)(Cross_Cov * second_deriv_neg_ll_.asDiagonal() * Cross_Cov.transpose()); } if (calc_pred_var) { T_mat Maux3 = Cross_Cov.cwiseProduct(Cross_Cov * second_deriv_neg_ll_.asDiagonal()); Maux = Maux.cwiseProduct(Maux); #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { pred_var[i] += Maux.col(i).sum() - Maux3.row(i).sum(); } } } ////Only for debugging //Log::REInfo("PredictLAApproxGroupedRE"); //for (int i = 0; i < 3; ++i) { // if (Cross_Cov.rows() > 1) { // Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i)); // } // else { // Log::REInfo("Cross_Cov[0,%d]: %g", i, Cross_Cov.coeff(0, i)); // } //} //for (int i = 0; i < 3; ++i) { // Log::REInfo("first_deriv_ll_[%d]: %g", i, first_deriv_ll_[i]); //} //for (int i = 0; i < std::min((int)pred_mean.size(), 3); ++i) { // Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]); //} //if (calc_pred_var) { // for (int i = 0; i < 3; ++i) { // Log::REInfo("pred_var[%d]: %g", i, pred_var[i]); // } //} }//end PredictLAApproxGroupedRE /*! * \brief Make predictions for the (latent) random effects when using the Laplace approximation. * Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z). * This version is used for the Laplace approximation when there are only grouped random effects with only one grouping variable. * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param sigma2 Variance of random effects * \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related * \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)") * \param pred_mean[out] Predicted mean * \param pred_cov[out] Predicted covariance matrix * \param pred_var[out] Predicted variances * \param calc_pred_cov If true, predictive covariance matrix is also calculated * \param calc_pred_var If true, predictive variances are also calculated * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void PredictLAApproxOnlyOneGroupedRECalculationsOnREScale(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const double sigma2, const data_size_t* const random_effects_indices_of_data, const T_mat& Cross_Cov, vec_t& pred_mean, T_mat& pred_cov, vec_t& pred_var, bool calc_pred_cov = false, bool calc_pred_var = false, bool calc_mode = false) { if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale(y_data, y_data_int, fixed_effects, num_data, sigma2, random_effects_indices_of_data, mll); } else { CHECK(mode_has_been_calculated_); } vec_t ZtFirstDeriv = vec_t::Zero(num_re_);//sqrt of diagonal matrix ZtWZ #pragma omp parallel { vec_t ZtFirstDeriv_private = vec_t::Zero(num_re_); #pragma omp for for (data_size_t i = 0; i < num_data; ++i) { ZtFirstDeriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_re_; ++i_re) { ZtFirstDeriv[i_re] += ZtFirstDeriv_private[i_re]; } }//end omp critical }//end omp parallel pred_mean = Cross_Cov * ZtFirstDeriv; vec_t diag_Sigma_plus_ZtWZI = vec_t(num_re_); diag_Sigma_plus_ZtWZI.array() = 1. / diag_SigmaI_plus_ZtWZ_.array(); diag_Sigma_plus_ZtWZI.array() /= sigma2; diag_Sigma_plus_ZtWZI.array() -= 1.; diag_Sigma_plus_ZtWZI.array() /= sigma2; if (calc_pred_cov) { T_mat Maux = Cross_Cov * diag_Sigma_plus_ZtWZI.asDiagonal() * Cross_Cov.transpose(); pred_cov += Maux; } if (calc_pred_var) { T_mat Maux = Cross_Cov * diag_Sigma_plus_ZtWZI.asDiagonal(); T_mat Maux2 = Cross_Cov.cwiseProduct(Maux); #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { pred_var[i] += Maux2.row(i).sum(); } } ////Only for debugging //Log::REInfo("PredictLAApproxOnlyOneGroupedRECalculationsOnREScale"); //for (int i = 0; i < 3; ++i) { // if (Cross_Cov.rows() > 1) { // Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i)); // } // else { // Log::REInfo("Cross_Cov[0,%d]: %g", i, Cross_Cov.coeff(0, i)); // } //} //for (int i = 0; i < 3; ++i) { // Log::REInfo("ZtFirstDeriv[%d]: %g", i, ZtFirstDeriv[i]); //} //for (int i = 0; i < std::min((int)pred_mean.size(),3); ++i) { // Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]); //} //if (calc_pred_var) { // for (int i = 0; i < 3; ++i) { // Log::REInfo("pred_var[%d]: %g", i, pred_var[i]); // } //} }//end PredictLAApproxOnlyOneGroupedRECalculationsOnREScale /*! * \brief Make predictions for the (latent) random effects when using the Laplace approximation. * Calculations are done by factorizing ("inverting) (Sigma^-1 + W) where it is assumed that an approximate Cholesky factor * of Sigma^-1 has previously been calculated using a Vecchia approximation. * This version is used for the Laplace approximation when there are only GP random effects and the Vecchia approximation is used. * Caveat: Sigma^-1 + W can be not very sparse * \param y_data Response variable data if response variable is continuous * \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used) * \param fixed_effects Fixed effects component of location parameter * \param num_data Number of data points * \param B Matrix B in Vecchia approximation Sigma^-1 = B^T D^-1 B ("=" Cholesky factor) * \param D_inv Diagonal matrix D^-1 in Vecchia approximation Sigma^-1 = B^T D^-1 B * \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)") * \param pred_mean[out] Predicted mean * \param pred_cov[out] Predicted covariance matrix * \param pred_var[out] Predicted variances * \param calc_pred_cov If true, predictive covariance matrix is also calculated * \param calc_pred_var If true, predictive variances are also calculated * \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false) */ void PredictLAApproxVecchia(const double* y_data, const int* y_data_int, const double* fixed_effects, const data_size_t num_data, const sp_mat_t& B, const sp_mat_t& D_inv, const T_mat& Cross_Cov, vec_t& pred_mean, T_mat& pred_cov, vec_t& pred_var, bool calc_pred_cov = false, bool calc_pred_var = false, bool calc_mode = false) { if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode double mll;//approximate marginal likelihood. This is a by-product that is not used here. FindModePostRandEffCalcMLLVecchia(y_data, y_data_int, fixed_effects, num_data, B, D_inv, mll); } else { CHECK(mode_has_been_calculated_); } pred_mean = Cross_Cov * first_deriv_ll_; if (calc_pred_cov || calc_pred_var) { T_mat SigmaI_CrossCovT = B.transpose() * D_inv * B * Cross_Cov.transpose(); T_mat Maux = SigmaI_CrossCovT; //Maux = L\(Sigma^-1 * Cross_Cov^T), L = Chol(Sigma^-1 + W) if (chol_fact_SigmaI_plus_ZtWZ_vecchia_.permutationP().size() > 0) {//Permutation is only used when having an ordering Maux = chol_fact_SigmaI_plus_ZtWZ_vecchia_.permutationP() * Maux; } chol_fact_SigmaI_plus_ZtWZ_vecchia_.matrixL().solveInPlace(Maux); if (calc_pred_cov) { pred_cov += -Cross_Cov * SigmaI_CrossCovT + Maux.transpose() * Maux; } if (calc_pred_var) { Maux = Maux.cwiseProduct(Maux); #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { pred_var[i] += Maux.col(i).sum() - (Cross_Cov.row(i)).dot(SigmaI_CrossCovT.col(i)); } } } }//end PredictLAApproxVecchia /*! * \brief Make predictions for the response variable (label) based on predictions for the mean and variance of the latent random effects * \param pred_mean[out] Predicted mean of latent random effects. The predicted mean for the response variables is written on this * \param pred_var[out] Predicted variances of latent random effects. The predicted variance for the response variables is written on this * \param predict_var If true, predictive variances are also calculated */ void PredictResponse(vec_t& pred_mean, vec_t& pred_var, bool predict_var = false) { if (likelihood_type_ == "bernoulli_probit") { #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { pred_mean[i] = normalCDF(pred_mean[i] / std::sqrt(1. + pred_var[i])); } if (predict_var) { #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { pred_var[i] = pred_mean[i] * (1. - pred_mean[i]); } } } else if (likelihood_type_ == "bernoulli_logit") { #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { pred_mean[i] = RespMeanAdaptiveGHQuadrature(pred_mean[i], pred_var[i]); } if (predict_var) { #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { pred_var[i] = pred_mean[i] * (1. - pred_mean[i]); } } } else if (likelihood_type_ == "poisson") { #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { double pm = RespMeanAdaptiveGHQuadrature(pred_mean[i], pred_var[i]); if (predict_var) { double psm = RespMeanAdaptiveGHQuadrature(2 * pred_mean[i], 4 * pred_var[i]); pred_var[i] = psm - pm * pm + pm; } pred_mean[i] = pm; } } else if (likelihood_type_ == "gamma") { #pragma omp parallel for schedule(static) for (int i = 0; i < (int)pred_mean.size(); ++i) { double pm = RespMeanAdaptiveGHQuadrature(pred_mean[i], pred_var[i]); if (predict_var) { double psm = RespMeanAdaptiveGHQuadrature(2 * pred_mean[i], 4 * pred_var[i]); pred_var[i] = psm - pm * pm + psm / aux_pars_[0]; } pred_mean[i] = pm; } } } /*! * \brief Adaptive GH quadrature to calculate predictive mean of response variable * \param latent_mean Predicted mean of latent random effects * \param latent_var Predicted variances of latent random effects */ double RespMeanAdaptiveGHQuadrature(const double latent_mean, const double latent_var) { // Find mode of integrand double mode_integrand, mode_integrand_last, update; mode_integrand = 0.; double sigma2_inv = 1. / latent_var; double sqrt_sigma2_inv = std::sqrt(sigma2_inv); for (int it = 0; it < 100; ++it) { mode_integrand_last = mode_integrand; update = (FirstDerivLogCondMeanLikelihood(mode_integrand) - sigma2_inv * (mode_integrand - latent_mean)) / (SecondDerivLogCondMeanLikelihood(mode_integrand) - sigma2_inv); mode_integrand -= update; if (std::abs(update) / std::abs(mode_integrand_last) < DELTA_REL_CONV_) { break; } } // Adaptive GH quadrature double sqrt2_sigma_hat = M_SQRT2 / std::sqrt(-SecondDerivLogCondMeanLikelihood(mode_integrand) + sigma2_inv); double x_val; double mean_resp = 0.; for (int j = 0; j < order_GH_; ++j) { x_val = sqrt2_sigma_hat * GH_nodes_[j] + mode_integrand; mean_resp += adaptive_GH_weights_[j] * CondMeanLikelihood(x_val) * normalPDF(sqrt_sigma2_inv * (x_val - latent_mean)); } mean_resp *= sqrt2_sigma_hat * sqrt_sigma2_inv; return mean_resp; ////non-adaptive GH quadrature //double mean_resp = 0.; //double sigma = std::sqrt(latent_var); //for (int j = 0; j < order_GH_; ++j) { // mean_resp += GH_weights_[j] * CondMeanLikelihood(M_SQRT2 * sigma * GH_nodes_[j] + latent_mean); //} //pred_mean *= M_1_SQRTPI_; } template <typename T>//T can be double or float bool AreSame(const T a, const T b) const { return fabs(a - b) < a * EPSILON_; } // Used for likelihood_type_ == "bernoulli_probit" inline double normalCDF(double value) const { return 0.5 * std::erfc(-value * M_SQRT1_2); } inline double normalPDF(double value) const { return std::exp(-value * value / 2) / M_SQRT2PI_; //return std::exp(-value * value / 2) / std::sqrt(2 * M_PI); } private: /*! \brief Number of data points */ data_size_t num_data_; /*! \brief Number (dimension) of random effects */ data_size_t num_re_; /*! \brief Posterior mode used for Laplace approximation */ vec_t mode_; /*! \brief Posterior mode used for Laplace approximation: saving a previously found value allows for reseting the mode when having a too large step size. */ vec_t mode_previous_value_; /*! \brief Auxiliary variable a=ZSigmaZt^-1 mode_b used for Laplace approximation */ vec_t a_vec_; /*! \brief First derivatives of the log-likelihood */ vec_t first_deriv_ll_; /*! \brief Second derivatives of the negative log-likelihood (diagonal of matrix "W") */ vec_t second_deriv_neg_ll_; /*! \brief Diagonal of matrix Sigma^-1 + Zt * W * Z in Laplace approximation (used only in version 'GroupedRE' when there is only one random effect and ZtWZ is diagonal. Otherwise 'diag_SigmaI_plus_ZtWZ_' is used for grouped REs) */ vec_t diag_SigmaI_plus_ZtWZ_; /*! \brief Cholesky factors of matrix Sigma^-1 + Zt * W * Z in Laplace approximation (used only in version'GroupedRE' if there is more than one random effect). */ chol_sp_mat_AMDOrder_t chol_fact_SigmaI_plus_ZtWZ_grouped_; /*! \brief Cholesky factors of matrix Sigma^-1 + Zt * W * Z in Laplace approximation (used only in version 'Vecchia') */ chol_sp_mat_AMDOrder_t chol_fact_SigmaI_plus_ZtWZ_vecchia_; //Note: chol_sp_mat_AMDOrder_t (AMD permutation) is faster than chol_sp_mat_t (no permutation) for the Vecchia approcimation but for the grouped random effects the difference is small. // chol_sp_mat_COLAMDOrder_t is slower than no ordering or chol_sp_mat_AMDOrder_t for both grouped random effects and the Vecchia approximation /*! * \brief Cholesky factors of matrix B = I + Wsqrt * Z * Sigma * Zt * Wsqrt in Laplace approximation (for version 'Stable') * or of matrix B = Id + ZtWZsqrt * Sigma * ZtWZsqrt (for version 'OnlyOneGPCalculationsOnREScale') */ T_chol chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_; /*! \brief If true, the pattern for the Cholesky factor (chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, chol_fact_SigmaI_plus_ZtWZ_grouped_, or chol_fact_SigmaI_plus_ZtWZ_vecchia_) has been analyzed */ bool chol_fact_pattern_analyzed_ = false; /*! \brief If true, the mode has been initialized to 0 */ bool mode_initialized_ = false; /*! \brief If true, the mode has been determined */ bool mode_has_been_calculated_ = false; /*! \brief If true, the function 'CheckY' has been called */ bool normalizing_constant_has_been_calculated_ = false; /*! \brief Normalizing constant for likelihoods (not all likelihoods have one) */ double log_normalizing_constant_; /*! \brief Type of likelihood */ string_t likelihood_type_ = "gaussian"; /*! \brief List of supported covariance likelihoods */ const std::set<string_t> SUPPORTED_LIKELIHOODS_{ "gaussian", "bernoulli_probit", "bernoulli_logit", "poisson", "gamma" }; /*! \brief Tolerance level when comparing two doubles for equality */ double EPSILON_ = 1e-6; /*! \brief Maximal number of iteration done for finding posterior mode with Newton's method */ int MAXIT_MODE_NEWTON_ = 1000; /*! \brief Used for cheking convergence in mode finding algorithm (terminate if relative change in Laplace approx. is below this value) */ double DELTA_REL_CONV_ = 1e-6; /*! \brief Additional parameters for likelihoods. For gamma, auxiliary_pars_[0] = shape parameter */ std::vector<double> aux_pars_; string_t ParseLikelihoodAlias(const string_t& likelihood) { if (likelihood == string_t("binary") || likelihood == string_t("bernoulli_probit") || likelihood == string_t("binary_probit")) { return "bernoulli_probit"; } else if (likelihood == string_t("gaussian") || likelihood == string_t("regression")) { return "gaussian"; } return likelihood; } //Derived constants not defined in cmath //1/sqrt(2*pi) const double M_SQRT2PI_ = std::sqrt(2. * M_PI); ////1/sqrt(pi) (not used anymore, used for non-adaptive GH quadrature) //const double M_1_SQRTPI_ = M_2_SQRTPI / 2.; /*! \brief Order of the Gauss-Hermite quadrature */ int order_GH_ = 30; /*! \brief Nodes and weights for the Gauss-Hermite quadrature */ // Source: https://keisan.casio.com/exec/system/1281195844 const std::vector<double> GH_nodes_ = { -6.863345293529891581061, -6.138279220123934620395, -5.533147151567495725118, -4.988918968589943944486, -4.48305535709251834189, -4.003908603861228815228, -3.544443873155349886925, -3.099970529586441748689, -2.667132124535617200571, -2.243391467761504072473, -1.826741143603688038836, -1.415527800198188511941, -1.008338271046723461805, -0.6039210586255523077782, -0.2011285765488714855458, 0.2011285765488714855458, 0.6039210586255523077782, 1.008338271046723461805, 1.415527800198188511941, 1.826741143603688038836, 2.243391467761504072473, 2.667132124535617200571, 3.099970529586441748689, 3.544443873155349886925, 4.003908603861228815228, 4.48305535709251834189, 4.988918968589943944486, 5.533147151567495725118, 6.138279220123934620395, 6.863345293529891581061 }; const std::vector<double> GH_weights_ = { 2.908254700131226229411E-21, 2.8103336027509037088E-17, 2.87860708054870606219E-14, 8.106186297463044204E-12, 9.1785804243785282085E-10, 5.10852245077594627739E-8, 1.57909488732471028835E-6, 2.9387252289229876415E-5, 3.48310124318685523421E-4, 0.00273792247306765846299, 0.0147038297048266835153, 0.0551441768702342511681, 0.1467358475408900997517, 0.2801309308392126674135, 0.386394889541813862556, 0.3863948895418138625556, 0.2801309308392126674135, 0.1467358475408900997517, 0.0551441768702342511681, 0.01470382970482668351528, 0.002737922473067658462989, 3.48310124318685523421E-4, 2.938725228922987641501E-5, 1.579094887324710288346E-6, 5.1085224507759462774E-8, 9.1785804243785282085E-10, 8.10618629746304420399E-12, 2.87860708054870606219E-14, 2.81033360275090370876E-17, 2.9082547001312262294E-21 }; const std::vector<double> adaptive_GH_weights_ = { 0.83424747101276179534, 0.64909798155426670071, 0.56940269194964050397, 0.52252568933135454964, 0.491057995832882696506, 0.46837481256472881677, 0.45132103599118862129, 0.438177022652683703695, 0.4279180629327437485828, 0.4198950037368240886418, 0.413679363611138937184, 0.4089815750035316024972, 0.4056051233256844363121, 0.403419816924804022553, 0.402346066701902927115, 0.4023460667019029271154, 0.4034198169248040225528, 0.4056051233256844363121, 0.4089815750035316024972, 0.413679363611138937184, 0.4198950037368240886418, 0.427918062932743748583, 0.4381770226526837037, 0.45132103599118862129, 0.46837481256472881677, 0.4910579958328826965056, 0.52252568933135454964, 0.56940269194964050397, 0.64909798155426670071, 0.83424747101276179534 }; /*! \brief Get number of non-zero entries in matrix */ template <class T_mat1, typename std::enable_if< std::is_same<sp_mat_t, T_mat1>::value>::type * = nullptr > int GetNumberNonZeros(T_mat1 M) { return((int)M.nonZeros()); }; template <class T_mat1, typename std::enable_if< std::is_same<den_mat_t, T_mat1>::value>::type * = nullptr > int GetNumberNonZeros(T_mat1 M) { return((int)M.cols() * M.rows()); }; };//end class Likelihood } // namespace GPBoost #endif // GPB_LIKELIHOODS_
Merge_sort.c
#include<stdio.h> #include<stdlib.h> #include<string.h> #include "omp.h" #define max 1000 void generate_list(int *x,int n) { for(int i=0;i<n;i++) x[i]=rand()%n; } void print_list(int *x,int n) { for(int i=0;i<n;i++) printf("%d\t",x[i]); } void merge(int *x,int n,int *temp) { int i=0; int j=n/2; int ti=0; while(i<n/2 && j<n){ if(x[i]<x[j]){ temp[ti]=x[i]; ti++; i++; } else { temp[ti]=x[j]; ti++; j++; } } while(i<n/2){ temp[ti]=x[i]; ti++; i++; } while(j<n){ temp[ti]=x[j]; ti++; j++; } memcpy(x,temp,n*sizeof(int)); } void merge_sort(int *x,int n,int *temp) { if(n<2) return; #pragma omp task firstprivate(x,n,temp) merge_sort(x,n/2,temp); #pragma omp task firstprivate(x,n,temp) merge_sort(x+(n/2),n-(n/2),temp); #pragma omp taskwait merge(x,n,temp); } void main() { int n=100; double start,stop; int data[max],tmp[max]; generate_list(data,n); printf("\nThe list before Sorting is:"); print_list(data,n); printf("\n"); start=omp_get_wtime(); #pragma omp parallel { #pragma omp single { merge_sort(data,n,tmp); } } stop=omp_get_wtime(); printf("\n\n\n"); print_list(data,n); printf("Time required is %g\n",stop-start); }
graph_generator.c
/* Copyright (C) 2009-2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #include <stdlib.h> #include <stdint.h> #include <assert.h> #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include <inttypes.h> #include "user_settings.h" #include "splittable_mrg.h" #include "graph_generator.h" /* Initiator settings: for faster random number generation, the initiator * probabilities are defined as fractions (a = INITIATOR_A_NUMERATOR / * INITIATOR_DENOMINATOR, b = c = INITIATOR_BC_NUMERATOR / * INITIATOR_DENOMINATOR, d = 1 - a - b - c. */ #define INITIATOR_A_NUMERATOR 5700 #define INITIATOR_BC_NUMERATOR 1900 #define INITIATOR_DENOMINATOR 10000 /* If this macro is defined to a non-zero value, use SPK_NOISE_LEVEL / * INITIATOR_DENOMINATOR as the noise parameter to use in introducing noise * into the graph parameters. The approach used is from "A Hitchhiker's Guide * to Choosing Parameters of Stochastic Kronecker Graphs" by C. Seshadhri, Ali * Pinar, and Tamara G. Kolda (http://arxiv.org/abs/1102.5046v1), except that * the adjustment here is chosen based on the current level being processed * rather than being chosen randomly. */ #define SPK_NOISE_LEVEL 0 /* #define SPK_NOISE_LEVEL 1000 -- in INITIATOR_DENOMINATOR units */ static int generate_4way_bernoulli(mrg_state* st, int level, int nlevels) { #if SPK_NOISE_LEVEL == 0 /* Avoid warnings */ (void)level; (void)nlevels; #endif /* Generate a pseudorandom number in the range [0, INITIATOR_DENOMINATOR) * without modulo bias. */ static const uint32_t limit = (UINT32_C(0x7FFFFFFF) % INITIATOR_DENOMINATOR); uint32_t val = mrg_get_uint_orig(st); if (/* Unlikely */ val < limit) { do { val = mrg_get_uint_orig(st); } while (val < limit); } #if SPK_NOISE_LEVEL == 0 int spk_noise_factor = 0; #else int spk_noise_factor = 2 * SPK_NOISE_LEVEL * level / nlevels - SPK_NOISE_LEVEL; #endif unsigned int adjusted_bc_numerator = (unsigned int)(INITIATOR_BC_NUMERATOR + spk_noise_factor); val %= INITIATOR_DENOMINATOR; if (val < adjusted_bc_numerator) return 1; val = (uint32_t)(val - adjusted_bc_numerator); if (val < adjusted_bc_numerator) return 2; val = (uint32_t)(val - adjusted_bc_numerator); #if SPK_NOISE_LEVEL == 0 if (val < INITIATOR_A_NUMERATOR) return 0; #else if (val < INITIATOR_A_NUMERATOR * (INITIATOR_DENOMINATOR - 2 * INITIATOR_BC_NUMERATOR) / (INITIATOR_DENOMINATOR - 2 * adjusted_bc_numerator)) return 0; #endif #if SPK_NOISE_LEVEL == 0 /* Avoid warnings */ (void)level; (void)nlevels; #endif return 3; } /* Reverse bits in a number; this should be optimized for performance * (including using bit- or byte-reverse intrinsics if your platform has them). * */ static inline uint64_t bitreverse(uint64_t x) { #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) #define USE_GCC_BYTESWAP /* __builtin_bswap* are in 4.3 but not 4.2 */ #endif #ifdef FAST_64BIT_ARITHMETIC /* 64-bit code */ #ifdef USE_GCC_BYTESWAP x = __builtin_bswap64(x); #else x = (x >> 32) | (x << 32); x = ((x >> 16) & UINT64_C(0x0000FFFF0000FFFF)) | ((x & UINT64_C(0x0000FFFF0000FFFF)) << 16); x = ((x >> 8) & UINT64_C(0x00FF00FF00FF00FF)) | ((x & UINT64_C(0x00FF00FF00FF00FF)) << 8); #endif x = ((x >> 4) & UINT64_C(0x0F0F0F0F0F0F0F0F)) | ((x & UINT64_C(0x0F0F0F0F0F0F0F0F)) << 4); x = ((x >> 2) & UINT64_C(0x3333333333333333)) | ((x & UINT64_C(0x3333333333333333)) << 2); x = ((x >> 1) & UINT64_C(0x5555555555555555)) | ((x & UINT64_C(0x5555555555555555)) << 1); return x; #else /* 32-bit code */ uint32_t h = (uint32_t)(x >> 32); uint32_t l = (uint32_t)(x & UINT32_MAX); #ifdef USE_GCC_BYTESWAP h = __builtin_bswap32(h); l = __builtin_bswap32(l); #else h = (h >> 16) | (h << 16); l = (l >> 16) | (l << 16); h = ((h >> 8) & UINT32_C(0x00FF00FF)) | ((h & UINT32_C(0x00FF00FF)) << 8); l = ((l >> 8) & UINT32_C(0x00FF00FF)) | ((l & UINT32_C(0x00FF00FF)) << 8); #endif h = ((h >> 4) & UINT32_C(0x0F0F0F0F)) | ((h & UINT32_C(0x0F0F0F0F)) << 4); l = ((l >> 4) & UINT32_C(0x0F0F0F0F)) | ((l & UINT32_C(0x0F0F0F0F)) << 4); h = ((h >> 2) & UINT32_C(0x33333333)) | ((h & UINT32_C(0x33333333)) << 2); l = ((l >> 2) & UINT32_C(0x33333333)) | ((l & UINT32_C(0x33333333)) << 2); h = ((h >> 1) & UINT32_C(0x55555555)) | ((h & UINT32_C(0x55555555)) << 1); l = ((l >> 1) & UINT32_C(0x55555555)) | ((l & UINT32_C(0x55555555)) << 1); return ((uint64_t)l << 32) | h; /* Swap halves */ #endif } /* Apply a permutation to scramble vertex numbers; a randomly generated * permutation is not used because applying it at scale is too expensive. */ static inline int64_t scramble(int64_t v0, int lgN, uint64_t val0, uint64_t val1) { uint64_t v = (uint64_t)v0; v += val0 + val1; v *= (val0 | UINT64_C(0x4519840211493211)); v = (bitreverse(v) >> (64 - lgN)); assert ((v >> lgN) == 0); v *= (val1 | UINT64_C(0x3050852102C843A5)); v = (bitreverse(v) >> (64 - lgN)); assert ((v >> lgN) == 0); return (int64_t)v; } /* Make a single graph edge using a pre-set MRG state. */ static void make_one_edge(int64_t nverts, int level, int lgN, mrg_state* st, packed_edge* result, uint64_t val0, uint64_t val1) { int64_t base_src = 0, base_tgt = 0; while (nverts > 1) { int square = generate_4way_bernoulli(st, level, lgN); int src_offset = square / 2; int tgt_offset = square % 2; assert (base_src <= base_tgt); if (base_src == base_tgt) { /* Clip-and-flip for undirected graph */ if (src_offset > tgt_offset) { int temp = src_offset; src_offset = tgt_offset; tgt_offset = temp; } } nverts /= 2; ++level; base_src += nverts * src_offset; base_tgt += nverts * tgt_offset; } write_edge(result, scramble(base_src, lgN, val0, val1), scramble(base_tgt, lgN, val0, val1)); } /* Generate a range of edges (from start_edge to end_edge of the total graph), * writing into elements [0, end_edge - start_edge) of the edges array. This * code is parallel on OpenMP and XMT; it must be used with * separately-implemented SPMD parallelism for MPI. */ void generate_kronecker_range( const uint_fast32_t seed[5] /* All values in [0, 2^31 - 1), not all zero */, int logN /* In base 2 */, int64_t start_edge, int64_t end_edge, packed_edge* edges #ifdef SSSP , float* weights #endif ) { mrg_state state; int64_t nverts = (int64_t)1 << logN; int64_t ei; mrg_seed(&state, seed); uint64_t val0, val1; /* Values for scrambling */ { mrg_state new_state = state; mrg_skip(&new_state, 50, 7, 0); val0 = mrg_get_uint_orig(&new_state); val0 *= UINT64_C(0xFFFFFFFF); val0 += mrg_get_uint_orig(&new_state); val1 = mrg_get_uint_orig(&new_state); val1 *= UINT64_C(0xFFFFFFFF); val1 += mrg_get_uint_orig(&new_state); } #ifdef _OPENMP #pragma omp parallel for #endif #ifdef __MTA__ #pragma mta assert parallel #pragma mta block schedule #endif for (ei = start_edge; ei < end_edge; ++ei) { mrg_state new_state = state; mrg_skip(&new_state, 0, (uint64_t)ei, 0); make_one_edge(nverts, 0, logN, &new_state, edges + (ei - start_edge), val0, val1); #ifdef SSSP weights[ei-start_edge]=mrg_get_float_orig(&new_state); #endif } }
MPI.h
/** * @file * This file is part of SeisSol. * * @author Sebastian Rettenberger (sebastian.rettenberger AT tum.de, http://www5.in.tum.de/wiki/index.php/Sebastian_Rettenberger) * * @section LICENSE * Copyright (c) 2015-2016, SeisSol Group * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * @section DESCRIPTION * MPI Wrapper */ #ifndef MPI_H #define MPI_H #ifndef USE_MPI #include "MPIDummy.h" #else // USE_MPI #include <mpi.h> #include "utils/logger.h" #include "MPIBasic.h" #ifdef ACL_DEVICE #include <cstdlib> #include <string> #include <sstream> #include <device.h> #endif // ACL_DEVICE #endif // USE_MPI namespace seissol { #ifndef USE_MPI typedef MPIDummy MPI; #else // USE_MPI /** * MPI handling. * * Make sure only one instance of this class exists! */ class MPI : public MPIBasic { private: MPI_Comm m_comm; #ifdef ACL_DEVICE int m_localRank{}; int m_localSize{}; int m_deviceId{}; #endif // ACL_DEVICE private: MPI() : m_comm(MPI_COMM_NULL) { } public: ~MPI() { } #ifdef ACL_DEVICE private: /** * @brief Reads and returns environment variables * * Some MPI vendors usually provides env. variables which allows to find out the local rank and size * before calling MPI_Init(...). However, they tend to name these variables differently, i.e. uniquely * for their implementation. Thus, the function take some potential candidates and loop through them and try * to retrieve a value. * * @param candidates a vector of strings with names of possible env. variables * @throws std::string in case if a value cannot get retrieved from a candidate list * @throws std::invalid_argument in case if an env. variable doesn't contain an integer, e.g. char, string, etc. * @throws std::out_of_range in case is an env. variable contains a value bigger that a size of integer * */ static int readValueFromEnvVariables(std::vector<std::string> &candidates) { char* valueStr = nullptr; for (auto envVar: candidates) { valueStr = std::getenv(envVar.c_str()); if (valueStr) break; } if (!valueStr) { std::stringstream stream; stream << "could not detect any env. variable from a list of candidates, namely: "; for (const auto& item: candidates) { stream << item << ", "; } stream << ". Please, consider to use any other MPI implementation with an offloading support."; logError() << stream.str(); } return std::stoi(std::string(valueStr)); } public: /** * @brief Inits Device(s). * * Some MPI implementations create a so-called context between GPUs and OS Processes inside of MPI_Init(...). * It results in allocating some memory buffers in memory attached to the nearest NUMA domain * of a core where a process is running. In case of somebody wants to bind a processes in a different way, * e.g. move a process closer to a GPU, it must be done before calling MPI_Init(...) using env. variables * or hwloc library. * * Currently, the function does a simple binding, i.e. a local rank controls the corresponding devices. * For instance, localRank=2 is going to use deviceId=2. The user is responsible for the correct binding. * She/he must refer to a documentation of their job scheduler or MPI implementation to achieve correct * GPU/CPU affinity. Note, one can improve the current binding strategy using hwloc. * See, Professional CUDA programming, subsection Affinity on MPI-CUDA Programs as a reference. * * The function supports the following MPI implementations: OpenMPI, MVAPICH2, IntelMPI * */ void bindRankToDevice() { try { std::vector<std::string> rankEnvVars{{"OMPI_COMM_WORLD_LOCAL_RANK"}, {"MV2_COMM_WORLD_LOCAL_RANK"}, {"SLURM_LOCALID"}, {"PMI_RANK"} }; std::vector<std::string> sizeEnvVars{{"OMPI_COMM_WORLD_LOCAL_SIZE"}, {"MV2_COMM_WORLD_LOCAL_SIZE"}, {"SLURM_NTASKS_PER_NODE"}, {"PMI_SIZE"}}; m_localRank = readValueFromEnvVariables(rankEnvVars); m_localSize = readValueFromEnvVariables(sizeEnvVars); } catch (const std::invalid_argument &err) { logError() << err.what() << ". File: " << __FILE__ << ", line: " << __LINE__; } catch (const std::out_of_range& err) { logError() << err.what() << ". File: " << __FILE__ << ", line: " << __LINE__; } device::DeviceInstance& device = device::DeviceInstance::getInstance(); int m_numDevices = device.api->getNumDevices(); if (m_localSize > m_numDevices) { logError() << "Local mpi size (in a compute node) is greater than the number of avaliable devices." << "Over-subscription of devices is currently not supported in Seissol." << "Adjust num. local mpi rank and num. local devices.\n" << "File: " << __FILE__ << ", line: " << __LINE__; } m_deviceId = m_localRank; #ifdef _OPENMP #pragma omp parallel { #pragma omp critical { device.api->setDevice(m_deviceId); } } #else device.api->setDevice(m_deviceId); #endif } int getDeviceID() { return m_deviceId; } #endif // ACL_DEVICE /** * Initialize MPI */ void init(int &argc, char** &argv) { int required = (m_threadsafe ? MPI_THREAD_MULTIPLE : MPI_THREAD_SINGLE); int provided; MPI_Init_thread(&argc, &argv, required, &provided); setComm(MPI_COMM_WORLD); // Test this after setComm() to get the correct m_rank if (required < provided) logWarning(m_rank) << utils::nospace << "Required MPI thread support (" << required << ") is smaller than provided thread support (" << provided << ")."; } void setComm(MPI_Comm comm) { m_comm = comm; MPI_Comm_rank(comm, &m_rank); MPI_Comm_size(comm, &m_size); } /** * @return The main communicator for the application */ MPI_Comm comm() const { return m_comm; } void barrier(MPI_Comm comm) const { MPI_Barrier(comm); } /** * Finalize MPI */ void finalize() { fault.finalize(); MPI_Finalize(); } public: /** The only instance of the class */ static MPI mpi; }; #endif // USE_MPI } #endif // MPI_H
core.c
#include "ghost/config.h" #include "ghost/core.h" #include "ghost/log.h" #include "ghost/util.h" #include "ghost/math.h" #include "ghost/machine.h" #include "ghost/locality.h" #include "ghost/taskq.h" #include "ghost/thpool.h" #include "ghost/timing.h" #include "ghost/pumap.h" #include "ghost/omp.h" #include "ghost/rand.h" //#include "ghost/tsmm.h" //#include "ghost/tsmm_inplace.h" //#include "ghost/tsmttsm.h" #include "ghost/instr.h" #include "ghost/autogen.h" #include <hwloc.h> #if HWLOC_API_VERSION >= 0x00010700 #include <hwloc/intel-mic.h> #else #warning "The HWLOC version is too old. Cannot detect Intel Xeon Phis!" #endif #ifdef GHOST_INSTR_LIKWID #include <likwid.h> #endif #ifdef GHOST_HAVE_CUDA #include <hwloc/cudart.h> #include <cuda_runtime.h> #endif #ifdef GHOST_HAVE_ZOLTAN #include <zoltan.h> #endif #include <strings.h> static ghost_type mytype = GHOST_TYPE_INVALID; static int MPIwasInitialized = 0; static int initialized = 0; /** * @brief A communicator containing only the processes with GHOST_HAVE_CUDA enabled. * * This is necessary, e.g., for gathering CUDA information in heterogeneous runs containing Xeon Phis. */ static ghost_mpi_comm ghost_cuda_comm = MPI_COMM_NULL; char * ghost_type_string(ghost_type t) { char *ret; GHOST_FUNC_ENTER(GHOST_FUNCTYPE_UTIL); switch (t) { case GHOST_TYPE_CUDA: ret = "CUDA"; break; case GHOST_TYPE_WORK: ret = "WORK"; break; case GHOST_TYPE_INVALID: ret = "INVALID"; break; default: ret = "Unknown"; } GHOST_FUNC_EXIT(GHOST_FUNCTYPE_UTIL); return ret; } ghost_error ghost_type_set(ghost_type t) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_UTIL); mytype = t; GHOST_FUNC_EXIT(GHOST_FUNCTYPE_UTIL); return GHOST_SUCCESS; } ghost_error ghost_type_get(ghost_type *t) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_UTIL); if (!t) { GHOST_ERROR_LOG("NULL pointer"); return GHOST_ERR_INVALID_ARG; } *t = mytype; GHOST_FUNC_EXIT(GHOST_FUNCTYPE_UTIL); return GHOST_SUCCESS; } int ghost_initialized() { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_UTIL); GHOST_FUNC_EXIT(GHOST_FUNCTYPE_UTIL); return initialized; } #ifdef GHOST_INSTR_LIKWID static void *likwidThreadInitTask(void *arg) { UNUSED(arg); #pragma omp parallel { likwid_markerThreadInit(); ghost_instr_prefix_set(""); ghost_instr_suffix_set(""); } return NULL; } #endif ghost_error ghost_init(int argc, char **argv) { if (initialized) { return GHOST_SUCCESS; } else { initialized=1; } ghost_instr_create(); ghost_instr_prefix_set(""); ghost_instr_suffix_set(""); #ifdef GHOST_HAVE_MPI int req, prov; #ifdef GHOST_HAVE_OPENMP req = MPI_THREAD_MULTIPLE; #else req = MPI_THREAD_SINGLE; #endif MPI_CALL_RETURN(MPI_Initialized(&MPIwasInitialized)); if (!MPIwasInitialized) { MPI_CALL_RETURN(MPI_Init_thread(&argc, &argv, req, &prov)); if (req != prov) { GHOST_WARNING_LOG("Required MPI threading level (%d) is not " "provided (%d)!",req,prov); } } else { GHOST_INFO_LOG("MPI was already initialized, not doing it!"); } #ifdef GHOST_HAVE_ZOLTAN float ver; ZOLTAN_CALL_RETURN(Zoltan_Initialize(argc, argv, &ver)); #endif ghost_nodecomm_setup(MPI_COMM_WORLD); ghost_mpi_datatypes_create(); ghost_mpi_operations_create(); #else // ifdef GHOST_HAVE_MPI UNUSED(MPIwasInitialized); UNUSED(argc); UNUSED(argv); #endif // ifdef GHOST_HAVE_MPI GHOST_FUNC_ENTER(GHOST_FUNCTYPE_SETUP); GHOST_CALL_RETURN(ghost_timing_start()); hwloc_topology_t topology; ghost_topology_create(); ghost_topology_get(&topology); hwloc_cpuset_t cpuset = hwloc_bitmap_alloc(); hwloc_get_cpubind(topology,cpuset,HWLOC_CPUBIND_PROCESS); if (hwloc_bitmap_weight(cpuset) < hwloc_get_nbobjs_by_type(topology,HWLOC_OBJ_PU)) { char *cpusetstr; ghost_bitmap_list_asprintf(&cpusetstr,cpuset); GHOST_WARNING_LOG("GHOST is running in a restricted CPU set: %s. This is probably not what you want because GHOST cares for pinning itself. If you want to restrict the resources exposed to GHOST use the GHOST_CPUSET environment variable.",cpusetstr); free(cpusetstr); } hwloc_bitmap_free(cpuset); cpuset = NULL; hwloc_cpuset_t mycpuset = hwloc_bitmap_alloc(); // auto-set rank types ghost_mpi_comm nodeComm; int nnoderanks; int noderank; GHOST_CALL_RETURN(ghost_nodecomm_get(&nodeComm)); GHOST_CALL_RETURN(ghost_nrank(&nnoderanks, nodeComm)); GHOST_CALL_RETURN(ghost_rank( &noderank, nodeComm)); hwloc_cpuset_t availcpuset = hwloc_bitmap_alloc(); char *envset = getenv("GHOST_CPUSET"); if (envset) { hwloc_bitmap_list_sscanf(availcpuset,envset); } else { hwloc_bitmap_copy(availcpuset,hwloc_topology_get_allowed_cpuset(topology)); } GHOST_IF_DEBUG(2) { char *cpusetStr; hwloc_bitmap_list_asprintf(&cpusetStr,availcpuset); GHOST_DEBUG_LOG(2,"Available CPU set: %s",cpusetStr); free(cpusetStr); } int nxeonphis_total; int ncudadevs = 0; int nxeonphis = -1; int nnumanodes; int npus; int ncores; int nsockets; nsockets = hwloc_get_nbobjs_inside_cpuset_by_type(topology,availcpuset,HWLOC_OBJ_SOCKET); nnumanodes = hwloc_get_nbobjs_inside_cpuset_by_type(topology,availcpuset,HWLOC_OBJ_NODE); ncores = hwloc_get_nbobjs_inside_cpuset_by_type(topology,availcpuset,HWLOC_OBJ_CORE); npus = hwloc_get_nbobjs_inside_cpuset_by_type(topology,availcpuset,HWLOC_OBJ_PU); GHOST_INFO_LOG("# sockets: %d, # NUMA nodes: %d, # cores: %d, # PUs: %d",nsockets,nnumanodes,ncores,npus); #ifdef GHOST_HAVE_CUDA GHOST_CALL_RETURN(ghost_cu_ndevice(&ncudadevs)); #endif #if HWLOC_API_VERSION >= 0x00010700 hwloc_obj_t phi = NULL; do { nxeonphis++; phi = hwloc_intel_mic_get_device_osdev_by_index(topology,nxeonphis); } while (phi); if (noderank == 0) { nxeonphis_total = nxeonphis; } else { nxeonphis_total = 0; } #ifdef GHOST_HAVE_MPI MPI_CALL_RETURN(MPI_Allreduce(MPI_IN_PLACE,&nxeonphis_total,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD)); #endif #else GHOST_WARNING_LOG("Possibly wrong information about the number of Xeon Phis due to outdated HWLOC!"); nxeonphis_total = 0; #endif int nactivephis = 0; #ifdef GHOST_BUILD_MIC nactivephis = 1; #endif #ifdef GHOST_HAVE_MPI MPI_CALL_RETURN(MPI_Allreduce(MPI_IN_PLACE,&nactivephis,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD)); #endif if (nactivephis < nxeonphis_total) { GHOST_PERFWARNING_LOG("There %s %d Xeon Phi%s in the set of active nodes but only %d %s used!", nxeonphis_total>1?"are":"is",nxeonphis_total,nxeonphis_total>1?"s":"",nactivephis,nactivephis==1?"is":"are"); } if (nnoderanks != nnumanodes+ncudadevs) { GHOST_PERFWARNING_LOG("The number of MPI processes (%d) on this node is not " "optimal! Suggested number: %d (%d NUMA domain%s + %d CUDA device%s)", nnoderanks,nnumanodes+ncudadevs,nnumanodes,nnumanodes==1?"":"s",ncudadevs,ncudadevs==1?"":"s"); } // get GHOST type set by the user ghost_type settype; GHOST_CALL_RETURN(ghost_type_get(&settype)); if (settype == GHOST_TYPE_INVALID) { char *envtype = getenv("GHOST_TYPE"); if (envtype) { if (!strncasecmp(envtype,"CUDA",4) || !strncasecmp(envtype,"GPU",3)) { mytype = GHOST_TYPE_CUDA; } else if (!strncasecmp(envtype,"WORK",4) || !strncasecmp(envtype,"CPU",3)) { mytype = GHOST_TYPE_WORK; } } } // type has been set by neither env nor API if (settype == GHOST_TYPE_INVALID && mytype == GHOST_TYPE_INVALID) { if (noderank == 0) { mytype = GHOST_TYPE_WORK; } else if (noderank <= ncudadevs) { mytype = GHOST_TYPE_CUDA; } else { mytype = GHOST_TYPE_WORK; } if (ncudadevs && nnoderanks > 1) { GHOST_INFO_LOG("Setting GHOST type to %s due to heuristics.",ghost_type_string(mytype)); } } #ifndef GHOST_HAVE_CUDA if (mytype == GHOST_TYPE_CUDA) { GHOST_WARNING_LOG("This rank is supposed to be a CUDA management rank but CUDA is not available. Re-setting GHOST type"); mytype = GHOST_TYPE_WORK; } #endif GHOST_CALL_RETURN(ghost_type_set(mytype)); int i; int localTypes[nnoderanks]; for (i=0; i<nnoderanks; i++) { localTypes[i] = GHOST_TYPE_INVALID; } localTypes[noderank] = mytype; int ncudaranks_on_node = mytype==GHOST_TYPE_CUDA; #ifdef GHOST_HAVE_MPI ghost_mpi_comm ghost_node_comm; GHOST_CALL_RETURN(ghost_nodecomm_get(&ghost_node_comm)); MPI_CALL_RETURN(MPI_Allreduce(MPI_IN_PLACE,&ncudaranks_on_node,1,MPI_INT,MPI_SUM,ghost_node_comm)); #ifdef GHOST_HAVE_CUDA if (ncudadevs < ncudaranks_on_node) { GHOST_WARNING_LOG("There are %d CUDA management ranks on this node but only %d CUDA devices.",ncudaranks_on_node,ncudadevs); } #endif MPI_CALL_RETURN(MPI_Allreduce(MPI_IN_PLACE,&localTypes,nnoderanks,MPI_INT,MPI_MAX,ghost_node_comm)); #endif ghost_hwconfig hwconfig; ghost_hwconfig_get(&hwconfig); int hasCuda = 0; hwloc_cpuset_t cudaOccupiedCpuset = hwloc_bitmap_alloc(); #ifdef GHOST_HAVE_CUDA hasCuda = 1; int cudaDevice = 0; if (hwconfig.cudevice != GHOST_HWCONFIG_INVALID) { GHOST_CALL_RETURN(ghost_cu_init(hwconfig.cudevice)); } else { // automatically assign a CUDA device for (i=0; i<nnoderanks; i++) { if (localTypes[i] == GHOST_TYPE_CUDA) { if (i == noderank) { hwconfig.cudevice = cudaDevice%ncudadevs; GHOST_CALL_RETURN(ghost_cu_init(hwconfig.cudevice)); } cudaDevice++; } } } GHOST_CALL_RETURN(ghost_hwconfig_set(hwconfig)); // CUDA ranks have a physical core cudaDevice = 0; hwloc_obj_t cudaCore = NULL; for (i=0; i<nnoderanks; i++) { if (localTypes[i] == GHOST_TYPE_CUDA) { hwloc_cpuset_t fullCuCpuset = hwloc_bitmap_alloc(); hwloc_cpuset_t reducedCuCpuset; HWLOC_CALL_RETURN(hwloc_cudart_get_device_cpuset(topology,cudaDevice,fullCuCpuset)); // restrict CUDA cpuset to CPUs which are still in global cpuset hwloc_bitmap_and(fullCuCpuset,fullCuCpuset,availcpuset); if (hwloc_bitmap_iszero(fullCuCpuset)) { GHOST_PERFWARNING_LOG("Placing CUDA process on far socket!"); hwloc_bitmap_copy(fullCuCpuset,availcpuset); } if (nnoderanks > 1) { // select a single core for this CUDA rank cudaCore = hwloc_get_next_obj_inside_cpuset_by_type(topology,fullCuCpuset,HWLOC_OBJ_CORE,cudaCore); reducedCuCpuset = cudaCore->cpuset; } else { reducedCuCpuset = fullCuCpuset; } if (noderank == i) { hwloc_bitmap_copy(mycpuset,reducedCuCpuset); } hwloc_bitmap_or(cudaOccupiedCpuset,cudaOccupiedCpuset,reducedCuCpuset); hwloc_bitmap_free(fullCuCpuset); cudaDevice++; } } #endif int ncpuranks_on_node = nnoderanks-ncudaranks_on_node; if (ncpuranks_on_node > 1) { // indicate whether the CPU ranks cover a full hwloc obj bool ranks_cover_obj = true; hwloc_obj_type_t distr_type; if (nsockets == ncpuranks_on_node) { GHOST_INFO_LOG("One process per socket"); distr_type = HWLOC_OBJ_SOCKET; } else if (nnumanodes == ncpuranks_on_node) { GHOST_INFO_LOG("One process per NUMA node"); distr_type = HWLOC_OBJ_NODE; } else if (ncores == ncpuranks_on_node) { GHOST_INFO_LOG("One process per core"); distr_type = HWLOC_OBJ_CORE; } else if (npus == ncpuranks_on_node) { GHOST_INFO_LOG("One process per PU"); distr_type = HWLOC_OBJ_PU; } else if (npus < ncpuranks_on_node) { distr_type = HWLOC_OBJ_PU; GHOST_PERFWARNING_LOG("Oversubscription! Some processes will share PUs!"); ranks_cover_obj = false; } else { GHOST_PERFWARNING_LOG("Naively sharing %d PUs among %d ranks",npus,ncpuranks_on_node); ranks_cover_obj = false; } hwloc_obj_t coverobj = NULL; int cpurank = 0; hwloc_bitmap_t rank_cpuset = hwloc_bitmap_alloc(); // we need a copy because we delete PUs from availcpuset as we go through the processes hwloc_cpuset_t fullavailcpuset = hwloc_bitmap_dup(availcpuset); for (i=0; i<nnoderanks; i++) { hwloc_bitmap_zero(rank_cpuset); if (localTypes[i] == GHOST_TYPE_WORK) { if (ranks_cover_obj) { // the obj covered by this rank coverobj = hwloc_get_obj_inside_cpuset_by_type(topology, fullavailcpuset, distr_type,cpurank); hwloc_bitmap_copy(rank_cpuset,coverobj->cpuset); } else { hwloc_obj_type_t dist_obj; int obj_per_rank; int nobj; if (ncpuranks_on_node <= ncores) { GHOST_PERFWARNING_LOG("Distributing cores among processes"); dist_obj = HWLOC_OBJ_CORE; obj_per_rank = ncores/ncpuranks_on_node; nobj = ncores; } else { dist_obj = HWLOC_OBJ_PU; nobj = npus; if (ncpuranks_on_node <= npus) { GHOST_PERFWARNING_LOG("Distributing PUs among processes"); obj_per_rank = npus/ncpuranks_on_node; } else { GHOST_PERFWARNING_LOG("More processes than PUs!"); obj_per_rank = 1; } } if (i == noderank) { int r,oi; // assign cores r = MIN(cpurank*obj_per_rank,(nobj-1)); for (oi=0; oi < obj_per_rank; oi++) { hwloc_bitmap_or(rank_cpuset,rank_cpuset,hwloc_get_obj_inside_cpuset_by_type(topology,fullavailcpuset,dist_obj,r)->cpuset); if (r<(nobj-1)) { r++; } } // remainder if (cpurank == ncpuranks_on_node-1) { for (; r<nobj; r++) { hwloc_bitmap_or(rank_cpuset,rank_cpuset,hwloc_get_obj_inside_cpuset_by_type(topology,fullavailcpuset,dist_obj,r)->cpuset); } } } cpurank++; } // set mycpuset if (i == noderank) { hwloc_bitmap_copy(mycpuset,rank_cpuset); } // delete my PUs from available CPU set hwloc_bitmap_andnot(availcpuset,availcpuset,rank_cpuset); if (ranks_cover_obj) { // only go to next obj if no oversubscription if (cpurank < hwloc_get_nbobjs_inside_cpuset_by_type(topology, fullavailcpuset, distr_type)-1) { cpurank++; } } } } hwloc_bitmap_free(rank_cpuset); hwloc_bitmap_free(fullavailcpuset); } else { GHOST_INFO_LOG("One process per node"); if (mytype == GHOST_TYPE_WORK) { hwloc_bitmap_copy(mycpuset,availcpuset); } } if (mytype == GHOST_TYPE_WORK) { // exclude CUDA cores from CPU set hwloc_bitmap_andnot(mycpuset,mycpuset,cudaOccupiedCpuset); } if (hwloc_bitmap_iszero(mycpuset)) { GHOST_WARNING_LOG("Something went wrong and I ended up with an empty CPU set! I will use all CPUs instead which will probably lead to resource conflicts!"); hwloc_bitmap_copy(mycpuset,availcpuset); } if (hwconfig.ncore == GHOST_HWCONFIG_INVALID) { ghost_machine_ncore(&hwconfig.ncore,GHOST_NUMANODE_ANY); } if (hwconfig.nsmt == GHOST_HWCONFIG_INVALID) { ghost_machine_nsmt(&hwconfig.nsmt); } #ifdef GHOST_HAVE_MPI int rank; ghost_mpi_comm tmpcomm; GHOST_CALL_RETURN(ghost_rank(&rank,MPI_COMM_WORLD)); MPI_CALL_RETURN(MPI_Comm_dup(MPI_COMM_WORLD,&tmpcomm)); MPI_CALL_RETURN(MPI_Comm_split(tmpcomm,hasCuda,rank,&ghost_cuda_comm)); MPI_CALL_RETURN(MPI_Comm_split(tmpcomm,hasCuda,rank,&ghost_cuda_comm)); MPI_CALL_RETURN(MPI_Comm_free(&tmpcomm)); #else UNUSED(hasCuda); #endif // delete excess PUs unsigned int firstcpu = hwloc_get_pu_obj_by_os_index(topology,hwloc_bitmap_first(mycpuset))->parent->logical_index; unsigned int cpu; hwloc_bitmap_foreach_begin(cpu,mycpuset); hwloc_obj_t obj = hwloc_get_pu_obj_by_os_index(topology,cpu); if (obj->parent->logical_index-firstcpu >= (unsigned)hwconfig.ncore) { hwloc_bitmap_clr(mycpuset,obj->os_index); if (hwloc_bitmap_iszero(mycpuset)) { GHOST_WARNING_LOG("Ignoring hwconfig setting as it would zero the CPU set!"); hwloc_bitmap_set(mycpuset,obj->os_index); } } if ((int)(obj->sibling_rank) >= hwconfig.nsmt) { hwloc_bitmap_clr(mycpuset,obj->os_index); if (hwloc_bitmap_iszero(mycpuset)) { GHOST_WARNING_LOG("Ignoring hwconfig setting as it would zero the CPU set!"); hwloc_bitmap_set(mycpuset,obj->os_index); } } hwloc_bitmap_foreach_end(); ghost_pumap_create(mycpuset); ghost_rand_create(); #ifdef GHOST_INSTR_LIKWID likwid_markerInit(); if (ghost_tasking_enabled()) { ghost_task *t; ghost_task_create(&t,GHOST_TASK_FILL_ALL,0,&likwidThreadInitTask,NULL,GHOST_TASK_DEFAULT, NULL, 0); ghost_task_enqueue(t); ghost_task_wait(t); ghost_task_destroy(t); } else { #pragma omp parallel likwid_markerThreadInit(); } #endif hwloc_bitmap_free(cudaOccupiedCpuset); hwloc_bitmap_free(mycpuset); mycpuset = NULL; hwloc_bitmap_free(availcpuset); availcpuset = NULL; GHOST_FUNC_EXIT(GHOST_FUNCTYPE_SETUP); return GHOST_SUCCESS; } ghost_error ghost_finalize() { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_TEARDOWN); static int finalized = 0; if (finalized) { return GHOST_SUCCESS; } else { finalized = 1; } if (ghost_autogen_missing()) { GHOST_PERFWARNING_LOG("Found missing autogenerated kernels! Please re-configure GHOST using:\ncmake . %s",ghost_autogen_string()); } ghost_rand_destroy(); #ifdef GHOST_INSTR_LIKWID likwid_markerClose(); #endif #ifdef GHOST_INSTR_TIMING #if GHOST_VERBOSITY // char *str; // ghost_timing_summarystring(&str); // GHOST_INFO_LOG("\n%s",str); // free(str); #endif #endif ghost_mpi_datatypes_destroy(); ghost_mpi_operations_destroy(); ghost_taskq_waitall(); ghost_taskq_destroy(); ghost_thpool_destroy(); ghost_pumap_destroy(); ghost_topology_destroy(); GHOST_FUNC_EXIT(GHOST_FUNCTYPE_TEARDOWN); ghost_timing_destroy(); ghost_instr_destroy(); #ifdef GHOST_HAVE_MPI if (!MPIwasInitialized) { MPI_Finalize(); } #endif // needs to be done _after_ MPI_Finalize() (for GPUdirect) ghost_cu_finalize(); return GHOST_SUCCESS; } ghost_error ghost_string(char **str) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_UTIL); GHOST_CALL_RETURN(ghost_malloc((void **)str,1)); memset(*str,'\0',1); ghost_header_string(str,"%s", GHOST_NAME); ghost_line_string(str,"Version",NULL,"%s",GHOST_VERSION); ghost_line_string(str,"Build date",NULL,"%s",__DATE__); ghost_line_string(str,"Build time",NULL,"%s",__TIME__); #ifdef GHOST_BUILD_MIC ghost_line_string(str,"MIC kernels",NULL,"Enabled"); #else ghost_line_string(str,"MIC kernels",NULL,"Disabled"); #endif #ifdef GHOST_BUILD_AVX ghost_line_string(str,"AVX kernels",NULL,"Enabled"); #else ghost_line_string(str,"AVX kernels",NULL,"Disabled"); #endif #ifdef GHOST_BUILD_SSE ghost_line_string(str,"SSE kernels",NULL,"Enabled"); #else ghost_line_string(str,"SSE kernels",NULL,"Disabled"); #endif #ifdef GHOST_HAVE_OPENMP ghost_line_string(str,"OpenMP support",NULL,"Enabled"); #else ghost_line_string(str,"OpenMP support",NULL,"Disabled"); #endif #ifdef GHOST_HAVE_MPI ghost_line_string(str,"MPI support",NULL,"Enabled"); #else ghost_line_string(str,"MPI support",NULL,"Disabled"); #endif #ifdef GHOST_HAVE_CUDA ghost_line_string(str,"CUDA support",NULL,"Enabled"); #else ghost_line_string(str,"CUDA support",NULL,"Disabled"); #endif #ifdef GHOST_INSTR_LIKWID #ifdef GHOST_INSTR_TIMING ghost_line_string(str,"Instrumentation",NULL,"Likwid+Timing"); #else ghost_line_string(str,"Instrumentation",NULL,"Likwid"); #endif #else #ifdef GHOST_INSTR_TIMING ghost_line_string(str,"Instrumentation",NULL,"Timing"); #else ghost_line_string(str,"Instrumentation",NULL,"Disabled"); #endif #endif #ifdef GHOST_IDX64_GLOBAL ghost_line_string(str,"Global index size","bits","64"); #else ghost_line_string(str,"Global index size","bits","32"); #endif #ifdef GHOST_IDX64_LOCAL ghost_line_string(str,"Local index size","bits","64"); #else ghost_line_string(str,"Local index size","bits","32"); #endif ghost_footer_string(str); GHOST_FUNC_EXIT(GHOST_FUNCTYPE_UTIL); return GHOST_SUCCESS; } ghost_error ghost_barrier() { #ifdef GHOST_HAVE_MPI MPI_CALL_RETURN(MPI_Barrier(MPI_COMM_WORLD)); #endif #ifdef GHOST_HAVE_CUDA GHOST_CALL_RETURN(ghost_cu_barrier()); #endif return GHOST_SUCCESS; } ghost_error ghost_cuda_comm_get(ghost_mpi_comm *comm) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_UTIL); *comm = ghost_cuda_comm; GHOST_FUNC_EXIT(GHOST_FUNCTYPE_UTIL); return GHOST_SUCCESS; }
shallow_water_residual_based_bdf_scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Miguel Maso Sotomayor // #ifndef KRATOS_SHALLOW_WATER_RESIDUAL_BASED_BDF_SCHEME_H_INCLUDED #define KRATOS_SHALLOW_WATER_RESIDUAL_BASED_BDF_SCHEME_H_INCLUDED // System includes // External includes // Project includes #include "includes/checks.h" #include "utilities/parallel_utilities.h" #include "utilities/time_discretization.h" #include "solving_strategies/schemes/residual_based_bdf_scheme.h" #include "custom_utilities/flow_rate_slip_utility.h" #include "shallow_water_application_variables.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ShallowWaterResidualBasedBDFScheme * @ingroup KratosShallowWaterApplication * @brief BDF integration scheme (for dynamic problems) * @details The \f$n\f$ order Backward Differentiation Formula (BDF) method is a two step \f$n\f$ order accurate method. * This scheme is designed to solve a system of the type: * \f[ * \mathbf{M} \frac{du_{n0}}{dt} + \mathbf{K} u_{n0} = \mathbf{f}_{ext} * \f] * @author Miguel Maso Sotomayor */ template<class TSparseSpace, class TDenseSpace> class ShallowWaterResidualBasedBDFScheme : public ResidualBasedBDFScheme<TSparseSpace, TDenseSpace> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION( ShallowWaterResidualBasedBDFScheme ); typedef Scheme<TSparseSpace,TDenseSpace> BaseType; typedef typename BaseType::Pointer BaseTypePointer; typedef ResidualBasedBDFScheme<TSparseSpace,TDenseSpace> BDFBaseType; typedef typename BDFBaseType::DofsArrayType DofsArrayType; typedef typename BDFBaseType::TSystemMatrixType TSystemMatrixType; typedef typename BDFBaseType::TSystemVectorType TSystemVectorType; typedef typename BDFBaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BDFBaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef ModelPart::NodesContainerType NodesArrayType; typedef typename ModelPart::NodeType NodeType; typedef FlowRateSlipUtility<LocalSystemMatrixType,LocalSystemVectorType,double>FlowRateSlipToolType; ///@} ///@name Life Cycle ///@{ // Constructor explicit ShallowWaterResidualBasedBDFScheme(const std::size_t Order = 2, bool UpdateVelocities = false) : BDFBaseType(Order) , mRotationTool() , mUpdateVelocities(UpdateVelocities) {} // Copy Constructor explicit ShallowWaterResidualBasedBDFScheme(ShallowWaterResidualBasedBDFScheme& rOther) : BDFBaseType(rOther) , mRotationTool() , mUpdateVelocities(rOther.mUpdateVelocities) {} /** * Clone */ BaseTypePointer Clone() override { return BaseTypePointer( new ShallowWaterResidualBasedBDFScheme(*this) ); } // Destructor ~ShallowWaterResidualBasedBDFScheme() override {} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Performing the update of the solution within newton iteration * @param rModelPart The model of the problem to solve * @param rDofSet Set of all primary variables * @param rA LHS matrix * @param rDx incremental update of primary variables * @param rb RHS Vector */ void Update( ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY; mRotationTool.RotateVelocities(rModelPart); mpDofUpdater->UpdateDofs(rDofSet, rDx); mRotationTool.RecoverVelocities(rModelPart); BDFBaseType::UpdateDerivatives(rModelPart, rDofSet, rA, rDx, rb); if (mUpdateVelocities) UpdateVelocities(rModelPart); KRATOS_CATCH("ShallowWaterResidualBasedBDFScheme.Update"); } /** * @brief Performing the prediction of the solution * @details It predicts the solution for the current step * @param rModelPart The model of the problem to solve * @param rDofSet set of all primary variables * @param rA LHS matrix * @param rDx Incremental update of primary variables * @param rb RHS Vector */ void Predict( ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY; const double delta_time = rModelPart.GetProcessInfo()[DELTA_TIME]; const int num_nodes = static_cast<int>( rModelPart.Nodes().size() ); const auto it_node_begin = rModelPart.Nodes().begin(); const std::array<const Variable<double>*, 3> var_components = {&MOMENTUM_X, &MOMENTUM_Y, &HEIGHT}; const std::array<const Variable<double>*, 3> accel_components = {&ACCELERATION_X, &ACCELERATION_Y, &VERTICAL_VELOCITY}; #pragma omp parallel for for (int i = 0; i < num_nodes; ++i) { auto it_node = it_node_begin + i; for (std::size_t j = 0; j < 3; ++j) { if (!it_node->IsFixed(*var_components[j])) { double& un0 = it_node->FastGetSolutionStepValue(*var_components[j]); double un1 = it_node->FastGetSolutionStepValue(*var_components[j], 1); double dot_un1 = it_node->FastGetSolutionStepValue(*accel_components[j], 1); un0 = un1 + delta_time * dot_un1; } } UpdateFirstDerivative(it_node); } KRATOS_CATCH("ShallowWaterResidualBasedBDFScheme.Predict"); } /** * @brief This function is designed to be called in the builder and solver to introduce the selected time integration scheme. * @param rCurrentElement The element to compute * @param rLHS_Contribution The LHS matrix contribution * @param rRHS_Contribution The RHS vector contribution * @param rEquationId The ID's of the element degrees of freedom * @param rCurrentProcessInfo The current process info instance */ void CalculateSystemContributions( Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo ) override { BDFBaseType::CalculateSystemContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rEquationId, rCurrentProcessInfo); mRotationTool.Rotate(rLHS_Contribution,rRHS_Contribution,rCurrentElement.GetGeometry()); mRotationTool.ApplySlipCondition(rLHS_Contribution,rRHS_Contribution,rCurrentElement.GetGeometry()); } /** * @brief This function is designed to calculate just the RHS contribution * @param rCurrentElement The element to compute * @param rRHS_Contribution The RHS vector contribution * @param rEquationId The ID's of the element degrees of freedom * @param rCurrentProcessInfo The current process info instance */ void CalculateRHSContribution( Element& rCurrentElement, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo ) override { BDFBaseType::CalculateRHSContribution( rCurrentElement, rRHS_Contribution, rEquationId, rCurrentProcessInfo); mRotationTool.Rotate(rRHS_Contribution,rCurrentElement.GetGeometry()); mRotationTool.ApplySlipCondition(rRHS_Contribution,rCurrentElement.GetGeometry()); } /** * @brief This function is designed to be called in the builder and solver to introduce the selected time integration scheme. * @param rCurrentCondition The condition to compute * @param rLHS_Contribution The LHS matrix contribution * @param rRHS_Contribution The RHS vector contribution * @param rEquationId The ID's of the element degrees of freedom * @param rCurrentProcessInfo The current process info instance */ void CalculateSystemContributions( Condition& rCurrentCondition, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo ) override { BDFBaseType::CalculateSystemContributions( rCurrentCondition, rLHS_Contribution, rRHS_Contribution, rEquationId, rCurrentProcessInfo); mRotationTool.Rotate(rLHS_Contribution,rRHS_Contribution,rCurrentCondition.GetGeometry()); mRotationTool.ApplySlipCondition(rLHS_Contribution,rRHS_Contribution,rCurrentCondition.GetGeometry()); } /** * @brief This function is designed to calculate just the RHS contribution * @param rCurrentCondition The condition to compute * @param rRHS_Contribution The RHS vector contribution * @param rEquationId The ID's of the element degrees of freedom * @param rCurrentProcessInfo The current process info instance */ void CalculateRHSContribution( Condition& rCurrentCondition, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo ) override { BDFBaseType::CalculateRHSContribution( rCurrentCondition, rRHS_Contribution, rEquationId, rCurrentProcessInfo); mRotationTool.Rotate(rRHS_Contribution,rCurrentCondition.GetGeometry()); mRotationTool.ApplySlipCondition(rRHS_Contribution,rCurrentCondition.GetGeometry()); } /* * @brief Free memory allocated by this class. */ void Clear() override { this->mpDofUpdater->Clear(); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ShallowWaterResidualBasedBDFScheme"; } ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); FlowRateSlipToolType mRotationTool; bool mUpdateVelocities; ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief Updating first time derivative * @param itNode the node interator */ void UpdateFirstDerivative(NodesArrayType::iterator itNode) override { array_1d<double, 3>& dot_un0 = itNode->FastGetSolutionStepValue(ACCELERATION); double& dot_hn0 = itNode->FastGetSolutionStepValue(VERTICAL_VELOCITY); noalias(dot_un0) = BDFBaseType::mBDF[0] * itNode->FastGetSolutionStepValue(MOMENTUM); dot_hn0 = BDFBaseType::mBDF[0] * itNode->FastGetSolutionStepValue(HEIGHT); for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order) { noalias(dot_un0) += BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(MOMENTUM, i_order); dot_hn0 += BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(HEIGHT, i_order); } } /** * @brief Updating second time derivative * @param itNode the node interator */ void UpdateSecondDerivative(NodesArrayType::iterator itNode) override {} /** * @brief Updating the velocities * @param rModelPart The model part to compute */ void UpdateVelocities(ModelPart& rModelPart) { block_for_each(rModelPart.Nodes(), [&](NodeType& r_node){ auto& vel = r_node.FastGetSolutionStepValue(VELOCITY); const auto& q = r_node.FastGetSolutionStepValue(MOMENTUM); const auto& h = r_node.FastGetSolutionStepValue(HEIGHT); vel = q / h; }); } /** * @brief It adds the dynamic LHS contribution of the elements * @param rLHS_Contribution The dynamic contribution for the LHS * @param rD The damping matrix * @param rM The mass matrix * @param rCurrentProcessInfo The current process info instance */ void AddDynamicsToLHS( LocalSystemMatrixType& rLHS_Contribution, LocalSystemMatrixType& rD, LocalSystemMatrixType& rM, const ProcessInfo& rCurrentProcessInfo ) override { // Adding mass contribution to the dynamic stiffness if (rM.size1() != 0) { // if M matrix declared noalias(rLHS_Contribution) += rM * BDFBaseType::mBDF[0]; } } /** * @brief It adds the dynamic RHS contribution of the elements * @param rElement The element to compute * @param RHS_Contribution The dynamic contribution for the RHS * @param D The damping matrix * @param M The mass matrix * @param rCurrentProcessInfo The current process info instance */ void AddDynamicsToRHS( Element& rElement, LocalSystemVectorType& rRHS_Contribution, LocalSystemMatrixType& rD, LocalSystemMatrixType& rM, const ProcessInfo& rCurrentProcessInfo ) override { const auto& r_const_element = rElement; const std::size_t this_thread = OpenMPUtils::ThisThread(); // Adding inertia contribution if (rM.size1() != 0) { r_const_element.GetFirstDerivativesVector(BDFBaseType::mVector.dotun0[this_thread], 0); noalias(rRHS_Contribution) -= prod(rM, BDFBaseType::mVector.dotun0[this_thread]); } } /** * @brief It adds the dynamic RHS contribution of the condition * @param rCondition The condition to compute * @param RHS_Contribution The dynamic contribution for the RHS * @param D The damping matrix * @param M The mass matrix * @param rCurrentProcessInfo The current process info instance */ void AddDynamicsToRHS( Condition& rCondition, LocalSystemVectorType& rRHS_Contribution, LocalSystemMatrixType& rD, LocalSystemMatrixType& rM, const ProcessInfo& rCurrentProcessInfo ) override { const auto& r_const_condition = rCondition; const std::size_t this_thread = OpenMPUtils::ThisThread(); // Adding inertia contribution if (rM.size1() != 0) { r_const_condition.GetFirstDerivativesVector(BDFBaseType::mVector.dotun0[this_thread], 0); noalias(rRHS_Contribution) -= prod(rM, BDFBaseType::mVector.dotun0[this_thread]); } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@{ }; // Class ShallowWaterResidualBasedBDFScheme ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} } // Namespace Kratos #endif // KRATOS_SHALLOW_WATER_RESIDUAL_BASED_BDF_SCHEME_H_INCLUDED defined
GB_unaryop__ainv_int16_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int16_uint16 // op(A') function: GB_tran__ainv_int16_uint16 // C type: int16_t // A type: uint16_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int16_uint16 ( int16_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mat.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <omp.h> #include "../include/mat.h" #include "../include/helper.h" #include "../include/parallel_for.h" mat_t* new_mat(int m, int n, int mode) { mat_t *mat = (mat_t*)malloc(sizeof(mat_t)); mat->m = m, mat->n = n; mat->mat = (vec_t*)malloc(sizeof(vec_t) * m); for(int i = 0;i < m;i++) { mat->mat[i] = (vec_t)malloc(sizeof(dtype) * n); for(int j = 0;j < n;j++) mat->mat[i][j] = mode ? float_rand(rand_rightBound) : 0; } return mat; } void free_mat(mat_t *matp) { for(int i = 0;i < matp->m;i++) free(matp->mat[i]); free(matp->mat); free(matp); } void print_mat(mat_t *matp) { for(int i = 0;i < matp->m;i++) { for(int j = 0;j < matp->n;j++) printf("%lf ", matp->mat[i][j]); printf("\n"); } } int mat_is_equal(mat_t *mata, mat_t *matb) { if(mata->m != matb->m || mata->n != matb->n) return 0; for(int i = 0;i < mata->m;i++) for(int j = 0;j < mata->n;j++) if(mata->mat[i][j] != matb->mat[i][j]) return 0; return 1; } mat_t* testmul(mat_t *mata, mat_t *matb) { mat_t *matc = NULL; if(mata->n != matb->m) { return matc; } else { matc = new_mat(mata->m, matb->n, 0); } for(int i = 0;i < mata->m;i++) for(int j = 0;j < matb->n;j++) for(int k = 0;k < mata->n;k++) matc->mat[i][j] += mata->mat[i][k] * matb->mat[k][j]; return matc; } mat_t* ompmul(mat_t *mata, mat_t *matb) { mat_t *matc = NULL; if(mata->n != matb->m) { return matc; } else { matc = new_mat(mata->m, matb->n, 0); } int i; #pragma omp parallel for \ default(none) shared(mata, matb, matc) private(i) for(i = 0;i < mata->m;i++) { for(int j = 0;j < matb->n;j++) for(int k = 0;k < mata->n;k++) matc->mat[i][j] += mata->mat[i][k] * matb->mat[k][j]; } return matc; } mat_t* paramul(mat_t *mata, mat_t *matb, int num_threads) { extern mat_t *matc; matc = NULL; if(mata->n != matb->m) { return matc; } else { matc = new_mat(mata->m, matb->n, 0); } parallel_index index; index.start = 0, index.end = mata->m, index.increment = 1; int privateArgs = 0; parallel_for(index, paramul_loop, num_threads, &privateArgs); return matc; } void* paramul_loop(void *privateArgs) { // declare these variables as global extern mat_t *mata, *matb, *matc; parallel_index myIndex = ((parallel_index*)privateArgs)[0]; for(int i = myIndex.start;i < myIndex.end;i += myIndex.increment) { for(int j = 0;j < matb->n;j++) for(int k = 0;k < mata->n;k++) matc->mat[i][j] += mata->mat[i][k] * matb->mat[k][j]; } return NULL; }
GB_unop__ainv_uint64_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__ainv_uint64_uint64) // op(A') function: GB (_unop_tran__ainv_uint64_uint64) // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = aij // unaryop: cij = -aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ uint64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = aij ; \ Cx [pC] = -z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__ainv_uint64_uint64) ( uint64_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = -z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = -z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__ainv_uint64_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__lt_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lt_fp64 // A.*B function (eWiseMult): GB_AemultB__lt_fp64 // A*D function (colscale): GB_AxD__lt_fp64 // D*A function (rowscale): GB_DxB__lt_fp64 // C+=B function (dense accum): GB_Cdense_accumB__lt_fp64 // C+=b function (dense accum): GB_Cdense_accumb__lt_fp64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lt_fp64 // C=scalar+B GB_bind1st__lt_fp64 // C=scalar+B' GB_bind1st_tran__lt_fp64 // C=A+scalar GB_bind2nd__lt_fp64 // C=A'+scalar GB_bind2nd_tran__lt_fp64 // C type: bool // A type: double // B,b type: double // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x < y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_FP64 || GxB_NO_LT_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lt_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lt_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lt_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lt_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lt_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__lt_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lt_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lt_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lt_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB_bind1st_tran__lt_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB_bind2nd_tran__lt_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
jacobi-ompacc-opt2.c
// Liao, 7/9/2014, add collapse() inside jacobi() // Liao, 1/22/2015, test nested map() clauses supported by device data environment reuse. #include <stdio.h> #include <math.h> #include <assert.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #endif // Add timing support #include <sys/time.h> double time_stamp() { struct timeval t; double time; gettimeofday(&t,(struct timezone*)NULL); time = t.tv_sec + 1.0e-6*t.tv_usec; return time; } double time1, time2; void driver(void); void initialize(void); void jacobi(void); void error_check(void); /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve parallelism. * All do loops are parallelized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define MSIZE 512 int n,m,mits; #define REAL float // flexible between float and double REAL error_ref= 9.212767E-04, resid_ref = 2.355429E-08; // depending on MSIZE!! REAL tol,relax=1.0,alpha=0.0543; REAL u[MSIZE][MSIZE],f[MSIZE][MSIZE],uold[MSIZE][MSIZE]; REAL dx,dy; // value, reference value, and the number of significant digits to be ensured. double diff_ratio (double val, double ref, int significant_digits) { assert (significant_digits>=1); double diff_ratio = fabs(val - ref )/fabs(ref); double upper_limit = pow (0.1, significant_digits); // 1.0/(double(10^significant_digits)) ; printf("value :%E ref_value: %E diff_ratio: %E upper_limit: %E \n",val, ref, diff_ratio, upper_limit); // ensure the number of the significant digits to be the same assert ( diff_ratio < upper_limit); return diff_ratio; } int main (void) { // float toler; /* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE); scanf ("%d",&n); scanf ("%d",&m); printf("Input tol - error tolerance for iterative solver\n"); scanf("%f",&toler); tol=(double)toler; printf("Input mits - Maximum iterations for solver\n"); scanf("%d",&mits); */ n=MSIZE; m=MSIZE; tol=0.0000000001; mits=5000; #if 0 // Not yet support concurrent CPU and GPU threads #ifdef _OPENMP #pragma omp parallel { #pragma omp single printf("Running using %d threads...\n",omp_get_num_threads()); } #endif #endif driver ( ) ; return 0; } /************************************************************* * Subroutine driver () * This is where the arrays are allocated and initialzed. * * Working varaibles/arrays * dx - grid spacing in x direction * dy - grid spacing in y direction *************************************************************/ void driver( ) { initialize(); time1 = time_stamp(); /* Solve Helmholtz equation */ jacobi (); time2 = time_stamp(); printf("------------------------\n"); printf("Execution time = %f\n",time2-time1); /* error_check (n,m,alpha,dx,dy,u,f)*/ error_check ( ); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize( ) { int i,j, xx,yy; //double PI=3.1415926; dx = 2.0 / (n-1); dy = 2.0 / (m-1); /* Initialize initial condition and RHS */ //#pragma omp parallel for private(xx,yy,j,i) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx =(int)( -1.0 + dx * (i-1)); yy = (int)(-1.0 + dy * (j-1)) ; u[i][j] = 0.0; f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\ - 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy); } } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * maxit Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi( ) { REAL omega; int i,j,k; REAL error,resid,ax,ay,b; // double error_local; // float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2; // float te1,te2; // float second; omega=relax; /* * Initialize coefficients */ ax = 1.0/(dx*dx); /* X-direction coef */ ay = 1.0/(dy*dy); /* Y-direction coef */ b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */ error = 10.0 * tol; k = 1; // An optimization on top of naive coding: promoting data handling outside the while loop // data properties may change since the scope is bigger: #pragma omp target data map(to:n, m, omega, ax, ay, b, f[0:n][0:m]) map(tofrom:u[0:n][0:m]) map(alloc:uold[0:n][0:m]) while ((k<=mits)&&(error>tol)) { error = 0.0; /* Copy new solution into old */ //#pragma omp parallel // { #pragma omp target map(to:n, m, u[0:n][0:m]) map(from:uold[0:n][0:m]) #pragma omp parallel for private(j,i) collapse(2) for(i=0;i<n;i++) for(j=0;j<m;j++) uold[i][j] = u[i][j]; #pragma omp target map(to:n, m, omega, ax, ay, b, f[0:n][0:m], uold[0:n][0:m]) map(from:u[0:n][0:m]) #pragma omp parallel for private(resid,j,i) reduction(+:error) collapse(2) // nowait for (i=1;i<(n-1);i++) for (j=1;j<(m-1);j++) { resid = (ax*(uold[i-1][j] + uold[i+1][j])\ + ay*(uold[i][j-1] + uold[i][j+1])+ b * uold[i][j] - f[i][j])/b; u[i][j] = uold[i][j] - omega * resid; error = error + resid*resid ; } // } /* omp end parallel */ /* Error check */ if (k%500==0) printf("Finished %d iteration with error =%f\n",k, error); error = sqrt(error)/(n*m); k = k + 1; } /* End iteration loop */ printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n", error); printf("Residual_ref :%E\n", resid_ref); printf ("Diff ref=%E\n", fabs(error-resid_ref)); assert (fabs(error-resid_ref) < 1E-13); } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check ( ) { int i,j; REAL xx,yy,temp,error; dx = 2.0 / (n-1); dy = 2.0 / (m-1); error = 0.0 ; //#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx = -1.0 + dx * (i-1); yy = -1.0 + dy * (j-1); temp = u[i][j] - (1.0-xx*xx)*(1.0-yy*yy); error = error + temp*temp; } error = sqrt(error)/(n*m); printf("Solution Error :%E \n",error); printf("Solution Error Ref :%E \n",error_ref); printf ("Diff ref=%E\n", fabs(error-error_ref)); assert (fabs(error-error_ref) < 1E-13); }
pmesh_util.c
/* This routine does *no* bounds checking. Be careful. */ void _bin_delta_c( double* rho, int* pixel_ind, double* pixel_weight, int* radial_ind, double* radial_weight, double* out, int npart, int npix, int nrad ) { #pragma omp parallel for for (int ipart = 0; ipart < npart; ipart++) { double v = rho[ipart]; for (int ipix = 0; ipix < npix; ipix++) { int ip = ipart * npix + ipix; int pi = pixel_ind[ip]; double pw = pixel_weight[ip]; for (int irad = 0; irad < nrad; irad++) { int ir = ipart * nrad + irad; int ri = radial_ind[ir]; double rw = radial_weight[ir]; if (rw < 0) continue; #pragma omp atomic out[ri * npix + pi] += v * pw * rw; } } } }
linux_printaff.c
#include "typesf2c.h" #ifndef linux Integer linux_printaff_(){ return (Integer) 0; } int linux_setffaff_(){ return (Integer) 0; } #else #ifdef MPI #include <mpi.h> #else #include "ga.h" #include "macdecls.h" #endif #define __USE_GNU #include <stdio.h> #include <unistd.h> #include <string.h> #include <sched.h> #ifdef USE_OPENMP #include <omp.h> #endif #include <sys/syscall.h> #define MXCPUS 1024 unsigned int i, caff[MXCPUS], numaff=0; cpu_set_t mycpuid; int myrank, thread; char hname[128]; Integer linux_printaff_(){ char affbuf[200 + 7*CPU_SETSIZE]; #ifdef MPI MPI_Comm_rank(MPI_COMM_WORLD,&myrank); #else myrank=GA_Nodeid(); #endif memset(affbuf,0, sizeof(affbuf)); memset(hname, 0, sizeof(hname)); (void)gethostname(hname, sizeof(hname)); #ifdef USE_OPENMP #pragma omp parallel private(thread, mycpuid, caff, numaff, affbuf, i) #endif { numaff=0; CPU_ZERO(&mycpuid); (void) sched_getaffinity(0, sizeof(mycpuid), &mycpuid); #ifdef USE_OPENMP thread = omp_get_thread_num(); #else thread = 0; #endif for (i = 0; i < MXCPUS; i++){ if (CPU_ISSET(i, &mycpuid)) { caff[numaff]=i; numaff+=1; } } if(numaff>0) { sprintf(affbuf,"rank %d thread %d host %s bind to %d CPUs:", myrank, thread, hname, numaff); for (i = 0; i < numaff; i++){ sprintf(affbuf + strlen(affbuf)," %i ", caff[i]); } } #ifdef USE_OPENMP #pragma omp barrier #endif puts(affbuf); fflush(stdout); } return (Integer) 0; } Integer linux_unsetaff_(){ #ifdef MPI MPI_Comm_rank(MPI_COMM_WORLD,&myrank); #else myrank=GA_Nodeid(); #endif CPU_ZERO(&mycpuid); for (i = 0; i < MXCPUS; i++){ CPU_SET(i, &mycpuid); } if (sched_setaffinity(0, sizeof(mycpuid), &mycpuid) < 0) { perror("sched_getaffinity"); return (Integer) -1; } return (Integer) 0; } #endif /* $Id$ */
smica.c
/* * smica.c * lowly_project * * Created by Karim Benabed on 30/10/09. * Copyright 2009 Institut d'Astrophysique de Paris. All rights reserved. * */ #include "smica.h" double smica_crit_classic(void *vsmic,error **err); double kld(int n, double* rq_hat, double* rq, error **err); void printMat(double* A, int n, int m) { int im,in; for(in=0;in<n;in++) { for(im=0;im<m-1;im++) { fprintf(stderr,"%g , ",A[in*m+im]); } fprintf(stderr,"%g\n",A[in*m+m-1]); } } // General funcs Smica* Smica_init(int nq, double *wq, int m, double *rq_hat, double* rq_0, int nc, SmicaComp **SC,error **err) { Smica* smic; int isc,iq,info; int trois; char uplo,diag; //_DEBUGHERE_("",""); smic = malloc_err(sizeof(Smica),err); forwardError(*err,__LINE__,NULL); //_DEBUGHERE_("",""); smic->nq = nq; smic->m = m; trois = 3; if(rq_0 == NULL) { trois = 2; } //_DEBUGHERE_("",""); smic->rq_hat = malloc_err(sizeof(double)*(trois*nq+1)*m*m, err); forwardError(*err,__LINE__,NULL); memcpy(smic->rq_hat,rq_hat,sizeof(double)*m*m*nq); smic->z_buf = smic->rq_hat + m*m*nq; smic->rq = smic->z_buf + m*m; //_DEBUGHERE_("",""); if (rq_0!=NULL) { smic->rq_0 = smic->rq + m*m*nq; memcpy(smic->rq_0,rq_0,sizeof(double)*m*m*nq); //_DEBUGHERE_("",""); } else { smic->rq_0 = NULL; //_DEBUGHERE_("",""); } //_DEBUGHERE_("",""); smic->nc = nc; smic->SC = malloc_err(sizeof(SmicaComp*)*nc, err); forwardError(*err,__LINE__,NULL); //_DEBUGHERE_("",""); smic->offset_nc = malloc_err(sizeof(int)*nc, err); forwardError(*err,__LINE__,NULL); //_DEBUGHERE_("",""); smic->offset_nc[0] = 0; smic->SC[0] = SC[0]; //_DEBUGHERE_("",""); for(isc=1;isc<nc;isc++) { //_DEBUGHERE_("",""); smic->offset_nc[isc] = smic->offset_nc[isc-1] + smic->SC[isc-1]->ndim; testErrorRetVA(m!=SC[isc]->m,smica_uncomp,"uncompatible number of band in component %d (got %d expected %d)",*err,__LINE__,NULL,isc,SC[isc]->m,m); testErrorRetVA(nq!=SC[isc]->nq,smica_uncomp,"uncompatible number of bins in component %d (got %d expected %d)",*err,__LINE__,NULL,isc,SC[isc]->nq,nq); //_DEBUGHERE_("",""); smic->SC[isc] = SC[isc]; } //_DEBUGHERE_("",""); smic->wq = malloc_err(sizeof(double)*nq,err); forwardError(*err,__LINE__,NULL); //_DEBUGHERE_("",""); if (wq==NULL) { for(iq=0;iq<nq;iq++) { //_DEBUGHERE_("",""); smic->wq[iq] = 1; } } else { //_DEBUGHERE_("",""); memcpy(smic->wq,wq,sizeof(double)*nq); } //_DEBUGHERE_("",""); smic->crit = &smica_crit_classic; smic->crit_classic_init = 0; smic->crit_cor = NULL; smic->gvec = NULL; smic->eig_buf = NULL; smic->eig_lwork = 0; smic->eig_nrm = 0; smic->lkl_data = NULL; smic->lkl_data_free = NULL; smic->cnt=0; return smic; } void free_Smica(void **psmic) { Smica *smic; int isc; smic = *psmic; free(smic->wq); free(smic->rq_hat); free(smic->offset_nc); for(isc=0;isc<smic->nc;isc++) { if (smic->SC[isc]->names!=NULL) { free(smic->SC[isc]->names); } if (smic->SC[isc]->free!=NULL) { smic->SC[isc]->free((void**)&(smic->SC[isc])); } } free(smic->SC); if (smic->gvec!=NULL) { free(smic->gvec); } if (smic->quad_mask!=NULL) { free(smic->quad_mask); } if (smic->crit_cor!=NULL) { free(smic->crit_cor); } if (smic->eig_buf!=NULL) { free(smic->eig_buf); free(smic->eig_nrm); } if (smic->lkl_data_free!=NULL) { smic->lkl_data_free(&(smic->lkl_data)); } free(smic); *psmic=NULL; } void Smica_fg(void* vsmic, double* pars, double* fgvec, error **err) { int isc; Smica *smic; double res; int iq,i,j; int iv; smic = vsmic; // init rq matrix memset(smic->rq,0,sizeof(double)*smic->m*smic->m*smic->nq); // update rq matrix according to each component for(isc=0;isc<smic->nc;isc++) { char nn[40]; if (smic->SC[isc]->isfg==0) { //_DEBUGHERE_("jump %d",isc); continue; } //_DEBUGHERE_("comp %d update (off %d)",isc,smic->offset_nc[isc]); //printMat(smic->rq, smic->m, smic->m); //_DEBUGHERE_("%g",*(pars+smic->offset_nc[isc])); smic->SC[isc]->update(smic->SC[isc],pars+smic->offset_nc[isc], smic->rq, err); forwardError(*err,__LINE__,); //sprintf(nn,"pq_%d.la",isc); //write_bin_vector(smic->rq, nn, sizeof(double)*(smic->nq*smic->m*smic->m), err); //forwardError(*err,__LINE__,); //_DEBUGHERE_("comp %d update done",isc); //printMat(smic->rq, smic->m, smic->m); } //symetrize matrix for (iq=0;iq<smic->nq;iq++) { double *rq; rq = smic->rq+iq*smic->m*smic->m; for(i=0;i<smic->m;i++) { for (j=0;j<i;j++) { rq[j+i*smic->m] = rq[j*smic->m+i]; } } } //write_bin_vector(smic->rq, "rq.dat", sizeof(double)*(smic->nq*smic->m*smic->m), err); //write_bin_vector(smic->rq_hat, "rqhat.dat", sizeof(double)*(smic->nq*smic->m*smic->m), err); // ici calculer la vraissemblance a partir de rq et rq_hat testErrorRet(smic->crit!=smica_crit_gauss,-24324,"not implemented",*err,__LINE__,); for (iv=0;iv<smic->quad_sn;iv++) { int civ; fgvec[iv] = smic->rq[smic->quad_mask[iv]]; } //_DEBUGHERE_("%g %g %g %g",fgvec[0],fgvec[1],fgvec[smic->quad_sn-2],fgvec[smic->quad_sn-1]); } void Smica_data(void* vsmic, double* fgvec, error **err) { int isc; Smica *smic; double res; int iq,i,j; int iv; smic = vsmic; testErrorRet(smic->crit!=&smica_crit_gauss,-24324,"not implemented",*err,__LINE__,); for (iv=0;iv<smic->quad_sn;iv++) { int civ; fgvec[iv] = smic->rq_hat[smic->quad_mask[iv]]; } //_DEBUGHERE_("%g %g %g %g",fgvec[0],fgvec[1],fgvec[smic->quad_sn-2],fgvec[smic->quad_sn-1]); } int Smica_vecsize(void* vsmic, error **err) { int isc; Smica *smic; double res; int iq,i,j; int iv; smic = vsmic; //_DEBUGHERE_("%p %p %p",smic,smic->crit,&smica_crit_gauss); testErrorRet(smic->crit!=&smica_crit_gauss,-24324,"not implemented",*err,__LINE__,0); return smic->quad_sn; } void Smica_gcal(void* vsmic, double* pars, double* fgvec, error **err) { int isc; Smica *smic; double res; int iq,i,j; int iv; smic = vsmic; // init rq matrix for(i=0;i<smic->nq*smic->m*smic->m;i++) { smic->rq[i] = 1; } // update rq matrix according to each component for(isc=0;isc<smic->nc;isc++) { char nn[40]; if (smic->SC[isc]->ismul==0) { //_DEBUGHERE_("jump %d",isc); continue; } //_DEBUGHERE_("comp %d update (off %d)",isc,smic->offset_nc[isc]); //printMat(smic->rq, smic->m, smic->m); //_DEBUGHERE_("%g",*(pars+smic->offset_nc[isc])); smic->SC[isc]->update(smic->SC[isc],pars+smic->offset_nc[isc], smic->rq, err); forwardError(*err,__LINE__,); //sprintf(nn,"pq_%d.la",isc); //write_bin_vector(smic->rq, nn, sizeof(double)*(smic->nq*smic->m*smic->m), err); //forwardError(*err,__LINE__,-1); //_DEBUGHERE_("comp %d update done",isc); //printMat(smic->rq, smic->m, smic->m); } //symetrize matrix for (iq=0;iq<smic->nq;iq++) { double *rq; rq = smic->rq+iq*smic->m*smic->m; for(i=0;i<smic->m;i++) { for (j=0;j<i;j++) { rq[j+i*smic->m] = rq[j*smic->m+i]; } } } //write_bin_vector(smic->rq, "rq.dat", sizeof(double)*(smic->nq*smic->m*smic->m), err); //write_bin_vector(smic->rq_hat, "rqhat.dat", sizeof(double)*(smic->nq*smic->m*smic->m), err); // ici calculer la vraissemblance a partir de rq et rq_hat testErrorRet(smic->crit!=smica_crit_gauss,-24324,"not implemented",*err,__LINE__,); for (iv=0;iv<smic->quad_sn;iv++) { int civ; fgvec[iv] = smic->rq[smic->quad_mask[iv]]; } //_DEBUGHERE_("%g %g %g %g",fgvec[0],fgvec[1],fgvec[smic->quad_sn-2],fgvec[smic->quad_sn-1]); } //#define TIMER_MSEC(TIMER_after,TIMER_before) (((TIMER_after.tv_sec-TIMER_before.tv_sec)*1000000+(TIMER_after.tv_usec-TIMER_before.tv_usec))/1000) double Smica_lkl(void* vsmic, double* pars, error **err) { int isc; Smica *smic; double res; int iq,i,j; //struct timeval starttime1,starttime2,starttime3,starttime4,endtime; smic = vsmic; //gettimeofday(&starttime1,NULL); // init rq matrix if (smic->rq_0 == NULL) { memset(smic->rq,0,sizeof(double)*smic->m*smic->m*smic->nq); } else { memcpy(smic->rq,smic->rq_0,sizeof(double)*smic->m*smic->m*smic->nq); } //gettimeofday(&starttime2,NULL); // update rq matrix according to each component for(isc=0;isc<smic->nc;isc++) { char nn[40]; //_DEBUGHERE_("comp %d update (off %d)",isc,smic->offset_nc[isc]); //printMat(smic->rq, smic->m, smic->m); //_DEBUGHERE_("%g",*(pars+smic->offset_nc[isc])); smic->SC[isc]->update(smic->SC[isc],pars+smic->offset_nc[isc], smic->rq, err); forwardError(*err,__LINE__,0); //sprintf(nn,"pq_%d.la",isc); //write_bin_vector(smic->rq, nn, sizeof(double)*(smic->nq*smic->m*smic->m), err); //forwardError(*err,__LINE__,-1); //_DEBUGHERE_("comp %d update done",isc); //printMat(smic->rq, smic->m, smic->m); } //symetrize matrix //gettimeofday(&starttime3,NULL); for (iq=0;iq<smic->nq;iq++) { double *rq; rq = smic->rq+iq*smic->m*smic->m; for(i=0;i<smic->m;i++) { for (j=0;j<i;j++) { rq[j+i*smic->m] = rq[j*smic->m+i]; } } } //write_bin_vector(smic->rq, "rq.dat", sizeof(double)*(smic->nq*smic->m*smic->m), err); //write_bin_vector(smic->rq_hat, "rqhat.dat", sizeof(double)*(smic->nq*smic->m*smic->m), err); // ici calculer la vraissemblance a partir de rq et rq_hat //gettimeofday(&starttime4,NULL); res = smic->crit(smic,err); forwardError(*err,__LINE__,0); //gettimeofday(&endtime,NULL); //testErrorRet(smic->cnt==1,-24324,"not implemented",*err,__LINE__,0); smic->cnt+=1; //_DEBUGHERE_("init %ld, update %ld, sym %ld,crit %ld, total %ld",TIMER_MSEC(starttime2,starttime1),TIMER_MSEC(starttime3,starttime2),TIMER_MSEC(starttime4,starttime3),TIMER_MSEC(endtime,starttime4),TIMER_MSEC(endtime,starttime1)); return res; } //gaussian approx criterion double smica_crit_gauss(void *vsmic, error **err) { int iq,im1,im2,iv,m2,m; double res,les; Smica *smic; char uplo; double done,dzero; int one,i,j; //struct timeval starttime1, starttime2,starttime3, endtime; smic = vsmic; //_DEBUGHERE_("%d %d",smic->nq,smic->m); //write_bin_vector(smic->rq, "rq.dat", sizeof(double)*(smic->nq*smic->m*smic->m), err); //forwardError(*err,__LINE__,0); //write_bin_vector(smic->rq_hat, "rq_hat.dat", sizeof(double)*(smic->nq*smic->m*smic->m), err); //forwardError(*err,__LINE__,0); // reorganize data //write_bin_vector(smic->quad_mask, "quad_mask.dat", sizeof(int)*(smic->quad_sn), err); //gettimeofday(&starttime1,NULL); m = smic->m; m2 = m*m; iv = 0; for (iv=0;iv<smic->quad_sn;iv++) { int civ; smic->gvec[iv] = smic->rq[smic->quad_mask[iv]] - smic->rq_hat[smic->quad_mask[iv]]; civ = smic->quad_mask[iv]; //_DEBUGHERE_("%d (%d %d %d) %d %g %g %g",iv,civ/m2,(civ-(civ/m2)*m2)/m,(civ-(civ/m2)*m2)%m, smic->quad_mask[iv],smic->rq[smic->quad_mask[iv]], smic->rq_hat[smic->quad_mask[iv]],smic->gvec[iv]); } //_DEBUGHERE_("%d",iv); //gettimeofday(&starttime2,NULL); //one = 1; //done = 1; //dzero = 0; //uplo = 'L'; //printMat(smic->crit_cor,smic->quad_sn,smic->quad_sn); //_DEBUGHER_("%d",smic->quad_sn); //write_bin_vector(smic->gvec, "gvec.dat", sizeof(double)*(smic->quad_sn), err); //write_bin_vector(smic->crit_cor, "crit_cor.dat", sizeof(double)*(smic->quad_sn)*(smic->quad_sn), err); //_DEBUGHERE_("",""); //for(i=0;i<smic->quad_sn;i++) { // smic->gvec[smic->quad_sn+i] = smic->crit_cor[i*smic->quad_sn]*smic->gvec[0]; // for(j=1;j<smic->quad_sn;j++) { // smic->gvec[smic->quad_sn+i] += smic->crit_cor[i*smic->quad_sn+j]*smic->gvec[j]; // } //} //dsymv(&uplo, &smic->quad_sn, &done, smic->crit_cor, &smic->quad_sn, smic->gvec, &one, &dzero, smic->gvec+smic->quad_sn, &one); //write_bin_vector(smic->gvec, "gvecCC.dat", sizeof(double)*(smic->quad_sn), err); //gettimeofday(&starttime3,NULL); /* res = 0; for(iq=0;iq<iv;iq++) { //_DEBUGHERE_("%g %g %g",res,smic->gvec[iq],smic->gvec[iq+iv]) res += smic->gvec[iq]*smic->gvec[iq+iv]; } */ res = 0; int qsn = smic->quad_sn; double *crit_cor = smic->crit_cor; double *gvec = smic->gvec; #pragma omp parallel for private(les,i,j) reduction(+:res) firstprivate(crit_cor,gvec) schedule(dynamic,512) //_DEBUGHERE_("tt",""); for(i=0;i<qsn;i++) { les = crit_cor[i*qsn+i]*gvec[i]; for(j=i+1;j<qsn;j++) { les += 2*gvec[j]*crit_cor[i*qsn+j]; } res += les*gvec[i]; } //gettimeofday(&endtime,NULL); //_DEBUGHERE_("select %ld, half product %ld, finish %ld, total %ld",TIMER_MSEC(starttime2,starttime1),TIMER_MSEC(starttime3,starttime2),TIMER_MSEC(endtime,starttime3),TIMER_MSEC(endtime,starttime1)); return -.5*res; } void smica_set_crit_gauss(Smica *smic, double *crit_cor, int *mask,int *ordering,error **err) { int iv,jv,iq; int nv; nv = (smic->nq * smic->m * (smic->m+1))/2; smic->quad_mask = malloc_err(sizeof(int)*smic->nq*smic->m*smic->m,err); forwardError(*err,__LINE__,); // mask is both in spectra an ell !!! nv = 0; if (ordering == NULL) { for (iv=0;iv<smic->m;iv++) { for (jv=iv;jv<smic->m;jv++) { for (iq=0;iq<smic->nq;iq++) { //_DEBUGHERE_("%d %d %d %d (%d)",iq,iv,jv,mask[iq*smic->m*smic->m+iv*smic->m+jv],nv); if (mask == NULL || mask[iq*smic->m*smic->m+iv*smic->m+jv]!=0) { smic->quad_mask[nv] = iq*smic->m*smic->m+iv*smic->m+jv; nv++; } } } } } else { int ic; for (ic=0;ic<(smic->m*(smic->m+1))/2;ic++) { iv = ordering[ic*2]; jv = ordering[ic*2+1]; for (iq=0;iq<smic->nq;iq++) { if (mask == NULL || mask[iq*smic->m*smic->m+iv*smic->m+jv]!=0) { //_DEBUGHERE_("%d %d %d %d (%d)",iq,iv,jv,mask[iq*smic->m*smic->m+iv*smic->m+jv],nv); smic->quad_mask[nv] = iq*smic->m*smic->m+iv*smic->m+jv; nv++; } } } } smic->gvec = malloc_err(sizeof(double)*nv*2,err); forwardError(*err,__LINE__,); smic->quad_sn = nv; smic->crit_cor = malloc_err(sizeof(double)*nv*nv,err); forwardError(*err,__LINE__,); memcpy(smic->crit_cor,crit_cor,sizeof(double)*nv*nv); smic->crit = &smica_crit_gauss; } // Simple components // constant void comp_cst_update(void* data,double* locpars, double* rq, error **err) { SmicaComp *SC; double * rq_0; int sz,one; double done; SC = data; rq_0 = SC->data; sz = SC->m*SC->m*SC->nq; done = 1; one = 1; daxpy(&sz,&done,rq_0,&one, rq, &one); } void free_comp_cst(void** data) { SmicaComp *SC; SC = *data; free(SC->data); free(SC); *data = NULL; } SmicaComp* comp_cst_init(int nq, int m, double *rq_0, error **err) { SmicaComp *SC; double *data; data = malloc_err(sizeof(double)*m*m*nq,err); forwardError(*err,__LINE__,NULL); memcpy(data,rq_0,sizeof(double)*m*m*nq); SC = alloc_SC(0,nq,m,data,&comp_cst_update,&free_comp_cst,err); forwardError(*err,__LINE__,NULL); return SC; } // 1D void comp_1D_AAt(int m, double *A, double *AAt, error **err) { int mm,one; char uplo; double done; memset(AAt,0,m*m*sizeof(double)); mm = m; uplo = 'L'; done = 1; one = 1; // -> AAt = 1 * A * A' + AAt //printMat(A, m, 1); dsyr(&uplo, &mm, &done, A, &one, AAt, &mm); //printMat(AAt, m, m); } SmicaComp* alloc_SC(int ndim,int nq,int m,void* data, update_rq* update, posterior_log_free* pfree, error **err) { SmicaComp* SC; SC = malloc_err(sizeof(SmicaComp), err); forwardError(*err,__LINE__,NULL); SC->m = m; SC->nq = nq; SC->ndim = ndim; SC->update = update; SC->data = data; SC->free = pfree; SC->names=NULL; SC_set_compname(SC,"UNK"); SC->isfg = 0; SC->ismul = 0; return SC; } void SC_set_compname(SmicaComp *SC, char *name) { sprintf(SC->comp_name,"%s",name); } void SC_isfg(SmicaComp *SC) { SC->isfg = 1; } void SC_ismul(SmicaComp *SC) { SC->ismul = 1; } void SC_setnames(SmicaComp *SC, char** names, error **err) { int i; if (SC->names!=NULL) { free(SC->names); } if (SC->ndim!=0) { SC->names = malloc_err(sizeof(_smicanames)*SC->ndim,err); forwardError(*err,__LINE__,); } else{ SC->names = malloc_err(sizeof(_smicanames)*1,err); forwardError(*err,__LINE__,); } for(i=0;i<SC->ndim;i++) { sprintf(SC->names[i],"%s",names[i]); } } SmicaComp* comp_1D_init(int nq, int m, double *A, error **err) { SmicaComp *SC; char uplo; double done; int one; SC_1D_data *data; int ndim; data = malloc_err(sizeof(SC_1D_data), err); forwardError(*err,__LINE__,NULL); data->AAt = malloc_err(sizeof(double)*m*m, err); forwardError(*err,__LINE__,NULL); if (A!=NULL) { data->Acst=1; ndim = nq; comp_1D_AAt(m, A,data->AAt, err); forwardError(*err,__LINE__,NULL); } else { data->Acst=0; ndim = nq + m; } SC = alloc_SC(ndim,nq,m,data,&comp_1D_update,&free_comp_1D,err); forwardError(*err,__LINE__,NULL); return SC; } void free_comp_1D(void** data) { SmicaComp *SC; SC = *data; //_DEBUGHERE_("",""); free(((SC_1D_data*) SC->data)->AAt); //_DEBUGHERE_("",""); free(SC->data); //_DEBUGHERE_("",""); free(SC); //_DEBUGHERE_("",""); *data = NULL; //_DEBUGHERE_("",""); } void comp_1D_update(void* data,double* locpars, double* rq, error **err) { int iq,one; SmicaComp *SC; double *AAt; int m,m2; double *mpars; SC_1D_data *SCdat; SC = data; m = SC->m; SCdat = SC->data; AAt = SCdat->AAt; m2 = m*m; one = 1; mpars = locpars; if (SCdat->Acst==0) { double *A; A = locpars; mpars = locpars + m; comp_1D_AAt(m,A,AAt, err); forwardError(*err,__LINE__,); } //printMat(AAt, m, m); for(iq=0;iq<SC->ndim;iq++) { //printMat(rq+m2*iq, m, m); daxpy(&m2,mpars + iq,AAt,&one, rq+m2*iq, &one); //printMat(rq+m2*iq, m, m); // -> rq[iq*m2] = locpars[iq] * AAt + rq[iq*m2] } } // nD SmicaComp* comp_nD_init(int nq, int m, int nd, double *A, error **err) { SmicaComp *SC; char uplo; double done; int one; int ndim; void* data; //_DEBUGHERE_("",""); data = malloc_err(sizeof(SC_nD_data), err); forwardError(*err,__LINE__,NULL); //_DEBUGHERE_("",""); ((SC_nD_data*) data)->nd = nd; //_DEBUGHERE_("",""); ((SC_nD_data*) data)->A = malloc_err(sizeof(double)*(m*nd*2+nd*nd), err); forwardError(*err,__LINE__,NULL); //_DEBUGHERE_("",""); if (A!=NULL) { ndim = nq*(nd*(nd+1))/2; ((SC_nD_data*) data)->Acst=1; //_DEBUGHERE_("",""); memcpy(((SC_nD_data*) data)->A,A,m*nd*sizeof(double)); } else { ndim = m*nd+nq*(nd*(nd+1))/2; ((SC_nD_data*) data)->Acst=0; //_DEBUGHERE_("",""); } //_DEBUGHERE_("",""); ((SC_nD_data*) data)->Ab = ((SC_nD_data*) data)->A + m*nd; ((SC_nD_data*) data)->P = ((SC_nD_data*) data)->Ab + m*nd; //_DEBUGHERE_("",""); // beware A is C oriented //_DEBUGHERE_("",""); SC = alloc_SC(ndim,nq,m,data,&comp_nD_update,&free_comp_nD,err); forwardError(*err,__LINE__,NULL); //_DEBUGHERE_("",""); return SC; } void free_comp_nD(void** data) { SmicaComp *SC; SC = *data; //_DEBUGHERE_("",""); free(((SC_nD_data*) SC->data)->A); //_DEBUGHERE_("",""); free(SC->data); //_DEBUGHERE_("",""); free(SC); //_DEBUGHERE_("",""); *data = NULL; } void comp_nD_update(void* data,double* locpars, double* rq, error **err) { int iq,one; SmicaComp *SC; double *A,*Ab,*P,*Ppack; int m,m2,nd,nq; double done,dzero; double *mpars; char transa,transb,side,uplo; // locpar is a (C oriented) q*nd*nd U Triangular matrix SC = data; m = SC->m; nq = SC->nq; nd = ((SC_nD_data*) SC->data)->nd; Ab = ((SC_nD_data*) SC->data)->Ab; P = ((SC_nD_data*) SC->data)->P; if (((SC_nD_data*) SC->data)->Acst==1) { A = ((SC_nD_data*) SC->data)->A; mpars = locpars; } else { A = locpars; mpars = locpars + m*nd; } m2 = m*m; one = 1; done = 1; dzero = 0; transb = 'N'; // because A is C ordered for(iq=0;iq<nq;iq++) { int ii; int ix,iy; // unpack locpar[q] Ppack = locpars + iq*((nd*(nd+1))/2); ii = 0; for(ix=0;ix<nd;ix++) { for(iy=ix;iy<nd;iy++) { P[ix*nd+iy] = Ppack[ii]; //P[iy*nd+ix] = Ppack[ii]; //P[ix*nd+iy] = 0; //P[iy*nd+ix] = 0; ii++; } //P[ix*nd+ix] = 1; } //printMat(P,2,2); //_DEBUGHERE_("P %d",iq); //printMat(P, nd, nd); //_DEBUGHERE_("A",""); //printMat(A, m, nd); //_DEBUGHERE_("P.A'",""); //printMat(Ab, m, nd); transa = 'N'; side = 'L'; uplo = 'L'; // Ab = P.A' (Ab is fortran ordered while A is C ordered) dsymm(&side, &uplo, &nd, &m, &done, P, &nd, A, &nd, &dzero, Ab, &nd); //dgemm(&transa, &transb, &nd, &m, &nd, &done, P, &nd, A, &nd, &dzero, Ab, &nd); //_DEBUGHERE_("P %d",iq); //printMat(P, nd, nd); //_DEBUGHERE_("A",""); //printMat(A, m, nd); //_DEBUGHERE_("P.A'",""); //printMat(Ab, m, nd); // Rq += A.Ab (=A.P.A') transa = 'T'; // because A is c ordered //_DEBUGHERE_("rq a",""); //printMat(rq+m2*iq, m, m); dgemm(&transa, &transb, &m, &m, &nd, &done, A, &nd, Ab, &nd, &done, rq+m2*iq, &m); //_DEBUGHERE_("rq b",""); //printMat(rq+m2*iq, m, m); //printMat(rq+m2*iq,2,2); } } // CMB SmicaComp * comp_CMB_init(int nbins, int mt,int mp, int *has_cl, double* Acprs, error **err) { double *A; SC_CMB_data* data; int mtot; int trois,im,i,six; SmicaComp *SC; data = malloc_err(sizeof(SC_CMB_data), err); forwardError(*err,__LINE__,NULL); trois = has_cl[0]+has_cl[1]+has_cl[2]; testErrorRet(trois==0,smica_uncomp,"mismatch",*err,__LINE__,NULL); six = trois + has_cl[3]+has_cl[4]+has_cl[5]; mtot = mt*has_cl[0]+(has_cl[1]+has_cl[2])*mp; testErrorRet(mtot==0,smica_uncomp,"mismatch",*err,__LINE__,NULL); testErrorRet(mt!=0 && has_cl[0]==0,smica_uncomp,"mismatch",*err,__LINE__,NULL); testErrorRet(mt==0 && has_cl[0]!=0,smica_uncomp,"mismatch",*err,__LINE__,NULL); testErrorRet(mp!=0 && has_cl[1]==0 && has_cl[2]==0,smica_uncomp,"mismatch",*err,__LINE__,NULL); testErrorRet(mp==0 && (has_cl[1]!=0 || has_cl[2]!=0),smica_uncomp,"mismatch",*err,__LINE__,NULL); testErrorRet(has_cl[0]==0 && (has_cl[3]!=0 || has_cl[4]!=0),smica_uncomp,"mismatch",*err,__LINE__,NULL); testErrorRet(has_cl[1]==0 && (has_cl[3]!=0 || has_cl[5]!=0),smica_uncomp,"mismatch",*err,__LINE__,NULL); testErrorRet(has_cl[2]==0 && (has_cl[4]!=0 || has_cl[5]!=0),smica_uncomp,"mismatch",*err,__LINE__,NULL); if (trois==1) { // cas particulier 1D data->locpars=NULL; data->SCnD = comp_1D_init(nbins,mtot,Acprs,err); forwardError(*err,__LINE__,NULL); SC = alloc_SC(nbins,nbins,mtot,data,&comp_CMB_update,&free_comp_CMB,err); forwardError(*err,__LINE__,NULL); return SC; } A = malloc_err(sizeof(double)*mtot*trois, err); forwardError(*err,__LINE__,NULL); memset(A,0,sizeof(double)*mtot*trois); // fill T if(has_cl[0]==1) { for(im=0;im<mt;im++) { A[im*trois] = Acprs[im]; } } // fill E and B for(im=0;im<mp;im++) { if (has_cl[1]==1) { A[(im+mt*has_cl[0])*trois+has_cl[0]] = Acprs[im+mt]; } if (has_cl[2]==1) { A[(im+mt*has_cl[0]+has_cl[1]*mp)*trois+has_cl[0]+has_cl[1]] = Acprs[im+mt]; } } data->locpars = malloc_err(sizeof(double)*nbins*(trois*(trois+1))/2,err); forwardError(*err,__LINE__,NULL); data->SCnD = comp_nD_init(nbins,mtot,trois,A,err); forwardError(*err,__LINE__,NULL); free(A); for(i=0;i<6;i++) { data->has_cl[i] = has_cl[i]; data->jmp_cl[i] = -1; } if (trois==3) { data->jmp_cl[0] = 0; data->jmp_cl[1] = 3; data->jmp_cl[2] = 5; if (has_cl[3]==1) { data->jmp_cl[3] = 1; } if (has_cl[4]==1) { data->jmp_cl[4] = 2; } if (has_cl[5]==1) { data->jmp_cl[5] = 4; } } if (trois==2) { if (has_cl[0]==1) { data->jmp_cl[0] = 0; if (has_cl[1]==1) { data->jmp_cl[1] = 2; if (has_cl[3]==1) { data->jmp_cl[3] = 1; } } else { data->jmp_cl[2] = 2; if (has_cl[4]==1) { data->jmp_cl[4] = 1; } } } else { data->jmp_cl[1] = 0; data->jmp_cl[2] = 2; if (has_cl[5]==1) { data->jmp_cl[5] = 1; } } } data->trois = trois; SC = alloc_SC(nbins*six,nbins,mtot,data,&comp_CMB_update,&free_comp_CMB,err); forwardError(*err,__LINE__,NULL); return SC; } void comp_CMB_update(void* data,double* locpars, double* rq, error **err) { SmicaComp *SC; SC_CMB_data *SCd; int td,ic,im,i,iq; SC = data; SCd = SC->data; if (SCd->locpars==NULL) { // 1d case comp_1D_update(SCd->SCnD,locpars,rq,err); forwardError(*err,__LINE__,); return; } td = (SCd->trois*(SCd->trois+1))/2; // nd il faut que je reoriente mon vecteur i=0; for(ic=0;ic<6;ic++) { if(SCd->jmp_cl[ic]!=-1) { for(iq=0;iq<SC->nq;iq++) { //_DEBUGHERE_("%d %d %d %g %d",ic,iq,i, locpars[i],iq*td+SCd->jmp_cl[ic]); SCd->locpars[iq*td+SCd->jmp_cl[ic]] = locpars[i]; i+=SCd->has_cl[ic]; } } } comp_nD_update(SCd->SCnD,SCd->locpars,rq,err); forwardError(*err,__LINE__,); return; } void free_comp_CMB(void** data) { SmicaComp *SC; SC_CMB_data *SCd; SC = *data; SCd = SC->data; SCd->SCnD->free((void**)&SCd->SCnD); if(SCd->locpars!=NULL) { free(SCd->locpars); } free(SCd); free(SC); *data = NULL; } SmicaComp* comp_calTP_init(int q, int mT, int mP, int *TEB, int npar, int *im,double *w,int *other, error **err ) { SC_calTP *gc; SmicaComp *SC; int m; int i; gc = malloc_err(sizeof(SC_calTP),err); forwardError(*err,__LINE__,NULL); gc->npar = npar; m = mT*TEB[0] + mP *TEB[1] + mP*TEB[2]; gc->mT = mT; gc->mP = mP; gc->TEB[0] = TEB[0]; gc->TEB[1] = TEB[1]; gc->TEB[2] = TEB[2]; gc->calvec = malloc_err(sizeof(double)*m,err); forwardError(*err,__LINE__,NULL); for (i=0;i<m;i++) { gc->calvec[i]=1; } gc->im = malloc_err(sizeof(int)*npar,err); forwardError(*err,__LINE__,NULL); memcpy(gc->im,im,sizeof(int)*npar); gc->w = malloc_err(sizeof(double)*m*m*2,err); forwardError(*err,__LINE__,NULL); memcpy(gc->w,w,sizeof(double)*m*m*2); gc->other = malloc_err(sizeof(int)*m*m*2,err); forwardError(*err,__LINE__,NULL); memcpy(gc->other,other,sizeof(int)*m*m*2); SC = alloc_SC(npar,q,m,gc, &comp_calTP_update, &comp_calTP_free,err); forwardError(*err,__LINE__,NULL); SC_ismul(SC); return SC; } void comp_calTP_update(void* data,double* locpars, double* rq, error **err) { SmicaComp *SC; SC_calTP *gc; int i,iq,im1,im2,m,m2; SC = data; gc = SC->data; for(i=0;i<SC->ndim;i++) { int im; im = gc->im[i]; gc->calvec[im] = exp(locpars[i]); if (gc->TEB[2]==1 && im>gc->mT) { gc->calvec[im+gc->mP*gc->TEB[1]] = gc->calvec[im]; } } m = SC->m; m2 = m*m; for(iq=0;iq<SC->nq;iq++) { int iqo; iqo = iq*m2; for(im1=0;im1<SC->m;im1++) { int imo; double k1; k1 = gc->calvec[im1]; imo = iqo+im1*m; for(im2=im1;im2<SC->m;im2++) { int mpos,im1_prime,im2_prime; double w,w_prime; mpos = (im1*m+im2)*2; //_DEBUGHERE_("%d %d %d %g %g %g",iq,im1,im2,k1,gc->calvec[im2],rq[imo+im2]); w = gc->w[mpos]; w_prime = gc->w[mpos+1]; im1_prime = gc->other[mpos]; im2_prime = gc->other[mpos+1]; //if (iq==0) { // _DEBUGHERE_("%d | %d %d -> %g %g %g %g, %d %d -> %g %g %g %g,",mpos,im1,im2,w,gc->calvec[im1],gc->calvec[im2],w*gc->calvec[im1]*gc->calvec[im2],im1_prime,im2_prime,w_prime,gc->calvec[im1_prime],gc->calvec[im2_prime],w_prime*gc->calvec[im1_prime]*gc->calvec[im2_prime]); // _DEBUGHERE_("%g %g",w*gc->calvec[im1]*gc->calvec[im2]+w_prime*gc->calvec[im1_prime]*gc->calvec[im2_prime],gc->calvec[im1]*gc->calvec[im2]); // } rq[imo+im2] *= w*gc->calvec[im1]*gc->calvec[im2]+w_prime*gc->calvec[im1_prime]*gc->calvec[im2_prime]; //rq[imo+im2] *= gc->calvec[im1]*gc->calvec[im2]; } } } } SmicaComp* comp_icalTP_init(int q, int mT, int mP, int *TEB, int npar, int *im,double *w,int *other, error **err ) { SC_calTP *gc; SmicaComp *SC; int m; int i; gc = malloc_err(sizeof(SC_calTP),err); forwardError(*err,__LINE__,NULL); gc->npar = npar; m = mT*TEB[0] + mP *TEB[1] + mP*TEB[2]; gc->mT = mT; gc->mP = mP; gc->TEB[0] = TEB[0]; gc->TEB[1] = TEB[1]; gc->TEB[2] = TEB[2]; gc->calvec = malloc_err(sizeof(double)*m,err); forwardError(*err,__LINE__,NULL); for (i=0;i<m;i++) { gc->calvec[i]=1; } gc->im = malloc_err(sizeof(int)*npar,err); forwardError(*err,__LINE__,NULL); memcpy(gc->im,im,sizeof(int)*npar); gc->w = malloc_err(sizeof(double)*m*m*2,err); forwardError(*err,__LINE__,NULL); memcpy(gc->w,w,sizeof(double)*m*m*2); gc->other = malloc_err(sizeof(int)*m*m*2,err); forwardError(*err,__LINE__,NULL); memcpy(gc->other,other,sizeof(int)*m*m*2); SC = alloc_SC(npar,q,m,gc, &comp_icalTP_update, &comp_calTP_free,err); forwardError(*err,__LINE__,NULL); SC_ismul(SC); return SC; } void comp_icalTP_update(void* data,double* locpars, double* rq, error **err) { SmicaComp *SC; SC_calTP *gc; int i,iq,im1,im2,m,m2; SC = data; gc = SC->data; for(i=0;i<SC->ndim;i++) { int im; im = gc->im[i]; gc->calvec[im] = 1./sqrt(locpars[i]); if (gc->TEB[2]==1 && im>gc->mT) { gc->calvec[im+gc->mP*gc->TEB[1]] = gc->calvec[im]; } } m = SC->m; m2 = m*m; for(iq=0;iq<SC->nq;iq++) { int iqo; iqo = iq*m2; for(im1=0;im1<SC->m;im1++) { int imo; double k1; k1 = gc->calvec[im1]; imo = iqo+im1*m; for(im2=im1;im2<SC->m;im2++) { int mpos,im1_prime,im2_prime; double w,w_prime; mpos = (im1*m+im2)*2; //_DEBUGHERE_("%d %d %d %g %g %g",iq,im1,im2,k1,gc->calvec[im2],rq[imo+im2]); w = gc->w[mpos]; w_prime = gc->w[mpos+1]; im1_prime = gc->other[mpos]; im2_prime = gc->other[mpos+1]; //if (iq==0) { // _DEBUGHERE_("%d | %d %d -> %g %g %g %g, %d %d -> %g %g %g %g,",mpos,im1,im2,w,gc->calvec[im1],gc->calvec[im2],w*gc->calvec[im1]*gc->calvec[im2],im1_prime,im2_prime,w_prime,gc->calvec[im1_prime],gc->calvec[im2_prime],w_prime*gc->calvec[im1_prime]*gc->calvec[im2_prime]); // _DEBUGHERE_("%g %g",w*gc->calvec[im1]*gc->calvec[im2]+w_prime*gc->calvec[im1_prime]*gc->calvec[im2_prime],gc->calvec[im1]*gc->calvec[im2]); // } rq[imo+im2] *= w*gc->calvec[im1]*gc->calvec[im2]+w_prime*gc->calvec[im1_prime]*gc->calvec[im2_prime]; //rq[imo+im2] *= gc->calvec[im1]*gc->calvec[im2]; } } } } void comp_calTP_free(void** data) { SmicaComp *SC; SC_calTP *gc; SC = *data; gc = SC->data; free(gc->im); free(gc->calvec); free(gc->w); free(gc->other); free(gc); free(SC); *data = NULL; } void comp_beamTP_update(void* data,double* locpars, double* rq, error **err) { SmicaComp *SC; SC_beamTP *gc; int t,iq,im1,im2,m,m2,neigen,offm,offq; double cal; SC = data; gc = SC->data; memcpy(gc->pars+1,locpars,sizeof(double)*SC->ndim); m = SC->m; m2 = m*m; neigen = gc->neigen; for(iq=0;iq<SC->nq;iq++) { for(im1=0;im1<m;im1++) { for(im2=im1;im2<m;im2++) { cal = 0; offm = im1*m+im2; offq = iq*m2+ offm; for(t=0;t<neigen;t++) { cal += gc->pars[gc->im[offm*neigen + t]] * gc->modes[offq*neigen + t]; //if(iq==0) _DEBUGHERE_("%d %d %d -> %d %g %d %g | %g %g",im1,im2,t,gc->im[offm*neigen + t], gc->pars[gc->im[offm*neigen + t]],offq*neigen+t,gc->modes[offq*neigen + t],cal,exp(cal)); } rq[offq] *= exp(2*cal); rq[iq*m2 + im2*m + im1] = rq[offq]; } } } } void comp_beamTP_free(void** data) { SmicaComp *SC; SC_beamTP *gc; SC = *data; gc = SC->data; free(gc->im); free(gc->pars); free(gc->modes); free(gc); free(SC); *data = NULL; } SmicaComp* comp_beamTP_init(int q, int mT, int mP, int *TEB, int npar, int *im,int neigen, double *modes,error **err ) { SC_beamTP *gc; SmicaComp *SC; int m; int i; gc = malloc_err(sizeof(SC_beamTP),err); forwardError(*err,__LINE__,NULL); m = mT*TEB[0] + mP *TEB[1] + mP*TEB[2]; gc->pars = malloc_err(sizeof(double)*(npar+1),err); forwardError(*err,__LINE__,NULL); gc->pars[0] = 0; gc->neigen = neigen; gc->modes = malloc_err(sizeof(double)*neigen*m*m*q,err); forwardError(*err,__LINE__,NULL); memcpy(gc->modes,modes,sizeof(double)*neigen*m*m*q); gc->im = malloc_err(sizeof(int)*neigen*m*m,err); forwardError(*err,__LINE__,NULL); memcpy(gc->im,im,sizeof(int)*neigen*m*m); SC = alloc_SC(npar,q,m,gc, &comp_beamTP_update, &comp_beamTP_free,err); forwardError(*err,__LINE__,NULL); SC_ismul(SC); return SC; } void comp_totcal_update(void* data,double* locpars, double* rq, error **err) { SmicaComp *SC; int t,iq,im1,im2,m,m2,neigen,offm,offq; double cal; SC = data; cal = 1./(locpars[0]*locpars[0]); m = SC->m; m2 = m*m; for(iq=0;iq<SC->nq;iq++) { for(im1=0;im1<m;im1++) { for(im2=im1;im2<m;im2++) { rq[iq*m2 + im1*m + im2] *= cal; rq[iq*m2 + im2*m + im1] = rq[iq*m2 + im1*m + im2]; } } } } void comp_totcal_free(void** data) { SmicaComp *SC; SC = *data; free(SC); *data = NULL; } SmicaComp* comp_totcal_init(int q, int mT, int mP, int *TEB,error **err ) { SC_beamTP *gc; SmicaComp *SC; int m; int i; m = mT*TEB[0] + mP *TEB[1] + mP*TEB[2]; SC = alloc_SC(1,q,m,NULL, &comp_totcal_update, &comp_totcal_free,err); forwardError(*err,__LINE__,NULL); SC_ismul(SC); return SC; } void comp_totcalP_free(void** data) { SmicaComp *SC; SC = *data; free(SC->data); free(SC); *data = NULL; } void comp_totcalP_update(void* data,double* locpars, double* rq, error **err) { SmicaComp *SC; int t,iq,im1,im2,m,m2,neigen,offm,offq,im0; double cal,scal; int *mz; SC = data; mz = SC->data; cal = 1./(locpars[0]*locpars[0]); scal = 1./(locpars[0]); m = SC->m; m2 = m*m; for(iq=0;iq<SC->nq;iq++) { // TP for(im1=0;im1<mz[0];im1++) { im0 = im1; if (im0<mz[0]) { im0 = mz[0]; } for(im2=im0;im2<m;im2++) { rq[iq*m2 + im1*m + im2] *= scal; rq[iq*m2 + im2*m + im1] = rq[iq*m2 + im1*m + im2]; } } //PP for(im1=mz[0];im1<m;im1++) { for(im2=im1;im2<m;im2++) { rq[iq*m2 + im1*m + im2] *= cal; rq[iq*m2 + im2*m + im1] = rq[iq*m2 + im1*m + im2]; } } } } SmicaComp* comp_totcalP_init(int q, int mT, int mP, int *TEB,error **err ) { SC_beamTP *gc; SmicaComp *SC; int m; int i; int *mz; m = mT*TEB[0] + mP *TEB[1] + mP*TEB[2]; mz = malloc_err(sizeof(int)*3,err); forwardError(*err,__LINE__,NULL); mz[0] = mT*TEB[0]; mz[1] = mT*TEB[1]; mz[2] = mT*TEB[2]; SC = alloc_SC(1,q,m,mz, &comp_totcalP_update, &comp_totcalP_free,err); forwardError(*err,__LINE__,NULL); SC_ismul(SC); return SC; } /* OLD */ double smica_crit_classic(void *vsmic,error **err) { double res; int iq,m,nq; Smica *smic; smic = vsmic; m = smic->m; nq = smic->nq; if (smic->crit_classic_init==0) { for(iq = 0; iq < nq;iq++) { //precompute chol decomposed rq_hat int mx,my,info; double *rql; char uplo; //_DEBUGHERE_("",""); rql = smic->rq_hat + iq*m*m; // chol uplo = 'L'; dpotrf(&uplo,&m,rql,&m,&info); testErrorRetVA(info!=0,lowly_chol,"Could not cholesky decompose rq_hat using dpotrf (%d)",*err,__LINE__,0,info); //_DEBUGHERE_("",""); // fill the U part with 0 (beware, f90!) for(mx=0;mx<m;mx++) { for(my=mx+1;my<m;my++) { rql[my*m+mx] = 0; } } //_DEBUGHERE_("",""); } smic->crit_classic_init=1; } res = 0; for(iq=0;iq<nq;iq++) { double kdd; //_DEBUGHERE_("iq %d -> %g",iq,res); memcpy(smic->z_buf,smic->rq_hat+m*m*iq,m*m*sizeof(double)); kdd = kld(m,smic->z_buf,smic->rq+m*m*iq,err); //_DEBUGHERE_("%g",kdd); res += smic->wq[iq] * kdd; //_DEBUGHERE_(" -> %g (%g)",res,smic->wq[iq]); forwardError(*err,__LINE__,0); } //_DEBUGHERE_(" --> %g (%g)",res); return -res; } double kld(int n, double* rq_hat, double* rq, error **err) { char uplo,trans,diag,side; int nn, info,i; double *z; double done; double res; // suppose que rq_hat et la chol rq_hat // rq_hat et rq sont detruits en sortie //_DEBUGHERE_("",""); //printMat(rq_hat,n,n); //_DEBUGHERE_("",""); //printMat(rq,n,n); // chol rq uplo = 'L'; nn = n; dpotrf(&uplo,&nn,rq,&nn,&info); testErrorRetVA(info!=0,lowly_chol,"Could not cholesky decompose rq using dpotrf (%d)",*err,__LINE__,0,info); //printMat(rq,nn,nn); // solve rq z = rq_hat side = 'L'; trans = 'N'; diag = 'N'; done = 1; dtrsm(&side, &uplo, &trans, &diag, &nn, &nn, &done, rq, &nn, rq_hat, &nn); z = rq_hat; //printMat(z,n,n); // calcule sum(z^2) res = 0; //_DEBUGHERE_(": %g",res); for(i=0;i<n*n;i++) { res += z[i]*z[i]; //_DEBUGHERE_(": %g",res); } //_DEBUGHERE_(": %g",res); for(i=0;i<n;i++) { res -= 2 * log(z[i*n+i]); //_DEBUGHERE_(": %g",res); } //_DEBUGHERE_(": %g",res); res -=n; //_DEBUGHERE_(": %g",res); return 0.5*res; }
1.c
// Contributed by Jeremy Zerfas // This controls the initial size used for the hash tables. This needs to be a power of two because a mask is also // calculated from this by using INITIAL_HASH_TABLE_SIZE-1. #define INITIAL_HASH_TABLE_SIZE 16 // This controls the maximum length for each set of oligonucleotide frequencies and each oligonucleotide count output by // this program. #define MAXIMUM_OUTPUT_LENGTH 4096 #include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <string.h> // intptr_t should be the native integer type on most sane systems. typedef intptr_t intnative_t; //****************************************** //*** Start of hash table implementation *** //****************************************** // In order to prevent too many collisions from occurring the hash table is grown when it is filled to a certain // percentage. This value sets the percentage that controls when growing should occur. This value must be set as a // fraction between 0 and 1 but sane values are generally around 3/4. Also do NOT place this value in parentheses since // it would just perform integer division and evaluate as zero, the value needs to be multiplied by another value first // before the division is performed in order to generate useful values. Setting the value too low causes the hash table // to be made larger than it needs to be which reduces the effectiveness of caches and setting it too high will cause a // large amount of collisions. #define HASH_TABLE_LOAD_LIMIT 12/16 typedef struct element{ #define EMPTY_VALUE_KEY -1 int64_t key; // If key is negative, then this element is empty, otherwise key and value contain the unmodified key // and value. int32_t value; } element; typedef struct hash_table{ intnative_t size; // The current capacity of the hash table. Never will actually be reached since the hash // table will be grown first when it reaches element_Limit. int64_t key_Mask; // ANDed with keys to make sure that hash table indices do not exceed the size of the // hash table. intnative_t element_Limit; // Controls the maximum amount of elements that are allowed in the hash table before it // will be grown. intnative_t element_Count; // The current amount of elements in the hash table. element * elements; } hash_table; // Create a hash table with space allocated for requested_Size elements. requested_Size must be a power of two since the // mask for keys is defined as requested_Size-1. hash_table * create_Hash_Table(intnative_t requested_Size){ hash_table * created_Hash_Table=malloc(sizeof(hash_table)); // Initialize the properties for the created_Hash_Table. created_Hash_Table->size=requested_Size; created_Hash_Table->key_Mask=requested_Size-1; created_Hash_Table->element_Limit=requested_Size*HASH_TABLE_LOAD_LIMIT; created_Hash_Table->element_Count=0; created_Hash_Table->elements=malloc(requested_Size*sizeof(element)); // Initialize all elements in the created_Hash_Table to have initial keys set to EMPTY_VALUE_KEY and values set to // 0. for(intnative_t i=0; i<requested_Size; i++) created_Hash_Table->elements[i]=(element){EMPTY_VALUE_KEY, 0}; return created_Hash_Table; } // Destroy hash table pointed to by hash_Table_To_Destroy and all of its elements. void destroy_Hash_Table(hash_table * hash_Table_To_Destroy){ free(hash_Table_To_Destroy->elements); free(hash_Table_To_Destroy); } // Hash function used to hash keys. #define hash_Key(key) (key ^ key>>7) // Grow hash_Table_To_Grow by quadrupling it in size. A new elements array is created, the existing elements are // inserted into the new elements array, the old elements array is deleted, and the properties for hash_Table_To_Grow // are updated. void grow_Hash_Table(hash_table * hash_Table_To_Grow){ intnative_t old_Hash_Table_Size=hash_Table_To_Grow->size; intnative_t new_Hash_Table_Size=old_Hash_Table_Size*4; // Keep a reference to old_Hash_Table_Elements and allocate space for new_Hash_Table_Elements. element * old_Hash_Table_Elements=hash_Table_To_Grow->elements; element * new_Hash_Table_Elements=malloc(new_Hash_Table_Size*sizeof(element)); // Update the properties for the hash_Table_To_Grow. hash_Table_To_Grow->size=new_Hash_Table_Size; hash_Table_To_Grow->key_Mask=new_Hash_Table_Size-1; hash_Table_To_Grow->element_Limit=new_Hash_Table_Size*HASH_TABLE_LOAD_LIMIT; hash_Table_To_Grow->elements=new_Hash_Table_Elements; // Initialize all elements in new_Hash_Table_Elements to have initial keys set to EMPTY_VALUE_KEY and values set to // 0. for(intnative_t i=0; i<new_Hash_Table_Size; i++) new_Hash_Table_Elements[i]=(element){EMPTY_VALUE_KEY, 0}; // Copy all old_Hash_Table_Elements to new_Hash_Table_Elements. This code is simpler and faster than using the // find_Or_Add_Element_For_Key() function since we don't need to worry about updating element_Count and checking to // see if we have reached element_Limit. for(intnative_t i=0; i<old_Hash_Table_Size; i++){ if(old_Hash_Table_Elements[i].key>=0){ int64_t elements_Index=hash_Key(old_Hash_Table_Elements[i].key) & hash_Table_To_Grow->key_Mask; // Find the first free spot in new_Hash_Table_Elements and copy the old element to it. while(new_Hash_Table_Elements[elements_Index].key>=0){ elements_Index++; elements_Index&=hash_Table_To_Grow->key_Mask; } new_Hash_Table_Elements[elements_Index]=old_Hash_Table_Elements[i]; } } free(old_Hash_Table_Elements); } // See if key is already in hash_Table and if so then return the element for it, otherwise add the key to hash_table // (and grow it if necessary) and return the element for it. element * find_Or_Add_Element_For_Key(hash_table * hash_Table, int64_t key){ int64_t elements_Index=hash_Key(key) & hash_Table->key_Mask; // Search hash_Table for key. element * elements=hash_Table->elements; while(elements[elements_Index].key!=key){ // If we reach a key with a negative value then that means that key is not in hash_Table so we will go ahead and // add it. if(elements[elements_Index].key<0){ // If we're at the hash table's load limit then grow the hash table and call this function a second time to // add and return an item. if(hash_Table->element_Count>=hash_Table->element_Limit){ grow_Hash_Table(hash_Table); return find_Or_Add_Element_For_Key(hash_Table, key); } // Set the key for this element to key, increment element_Count, and break out of the loop so that this // element will be returned. elements[elements_Index].key=key; hash_Table->element_Count++; break; } // Still haven't found key or a free spot so continue to the next index. elements_Index++; elements_Index&=hash_Table->key_Mask; } return &elements[elements_Index]; } //****************************************** //*** End of hash table implementation *** //****************************************** // Macro to convert a nucleotide character to a code. Note that upper and lower case ASCII letters only differ in the // fifth bit from the right and we only need the three least significant bits to differentiate the letters 'A', 'C', // 'G', and 'T'. Spaces in this array/string will never be used as long as characters other than 'A', 'C', 'G', and 'T' // aren't used. #define code_For_Nucleotide(nucleotide) (" \0 \1\3 \2"[nucleotide & 0b111]) // And one more macro to convert the codes back to nucleotide characters. #define nucleotide_For_Code(code) ("ACGT"[code & 0b11]) // Function to use when sorting elements with qsort() later. Elements with larger values will come first and in cases of // identical values then elements with smaller keys will come first. int element_Compare(const void * uncasted_Left_Element, const void * uncasted_Right_Element){ const element * left_Element=uncasted_Left_Element, * right_Element=uncasted_Right_Element; // Sort based on element values. if(left_Element->value < right_Element->value) return 1; if(left_Element->value > right_Element->value) return -1; // If we got here then both items have the same value so then sort based on key. if(left_Element->key > right_Element->key) return 1; else return -1; } // Generate frequences for all oligonucleotides in polynucleotide that are of desired_Length_For_Oligonucleotides and // then save it to output. void generate_Frequencies_For_Desired_Length_Oligonucleotides(char * polynucleotide, intnative_t polynucleotide_Length , intnative_t desired_Length_For_Oligonucleotides, char * output){ hash_table * hash_Table=create_Hash_Table(INITIAL_HASH_TABLE_SIZE); // Add all the complete oligonucleotides of desired_Length_For_Oligonucleotides to hash_Table. int64_t code=0; for(intnative_t i=0; i<polynucleotide_Length; i++){ int64_t mask=((int64_t)1<<2*desired_Length_For_Oligonucleotides)-1; code=(code<<2 & mask) | polynucleotide[i]; if(i>=desired_Length_For_Oligonucleotides-1) find_Or_Add_Element_For_Key(hash_Table, code)->value++; } // Create an array of elements from hash_Table. intnative_t elements_Array_Size=hash_Table->element_Count; element * elements_Array=malloc(elements_Array_Size*sizeof(element)); for(intnative_t i=0, j=0; i<hash_Table->size; i++){ if(hash_Table->elements[i].key>=0){ elements_Array[j].key=hash_Table->elements[i].key; elements_Array[j].value=hash_Table->elements[i].value; j++; } } destroy_Hash_Table(hash_Table); // Sort elements_Array. qsort(elements_Array, elements_Array_Size, sizeof(element), &element_Compare); // Calculate the total_Count of all elements. intnative_t total_Count=0; for(intnative_t i=0; i<elements_Array_Size; i++) total_Count+=elements_Array[i].value; // Print the frequencies for each element. for(intnative_t output_Position=0, i=0; i<elements_Array_Size; i++){ // Decode key back into a oligonucleotide. char oligonucleotide[desired_Length_For_Oligonucleotides+1]; for(intnative_t j=desired_Length_For_Oligonucleotides-1; j>-1; j--){ oligonucleotide[j]=nucleotide_For_Code(elements_Array[i].key); elements_Array[i].key>>=2; } oligonucleotide[desired_Length_For_Oligonucleotides]='\0'; // Output the frequency for oligonucleotide to output. output_Position+=snprintf(output+output_Position, MAXIMUM_OUTPUT_LENGTH-output_Position, "%s %.3f\n" , oligonucleotide, 100.0f*elements_Array[i].value/total_Count); } free(elements_Array); } // Generate a count for the number of times oligonucleotide appears in polynucleotide and then save it to output. void generate_Count_For_Oligonucleotide(char * polynucleotide, intnative_t polynucleotide_Length, char * oligonucleotide , char * output){ intnative_t oligonucleotide_Length=strlen(oligonucleotide); hash_table * hash_Table=create_Hash_Table(INITIAL_HASH_TABLE_SIZE); // Add all the complete oligonucleotides of oligonucleotide_Length to hash_Table. int64_t key=0; for(intnative_t i=0; i<polynucleotide_Length; i++){ int64_t mask=((int64_t)1<<2*oligonucleotide_Length)-1; key=(key<<2 & mask) | polynucleotide[i]; if(i>=oligonucleotide_Length) find_Or_Add_Element_For_Key(hash_Table, key)->value++; } // Generate key for the oligonucleotide. key=0; for(intnative_t i=0; i<oligonucleotide_Length; i++) key=(key<<2) | code_For_Nucleotide(oligonucleotide[i]); // Output the count for oligonucleotide to output. intnative_t count=find_Or_Add_Element_For_Key(hash_Table, key)->value; snprintf(output, MAXIMUM_OUTPUT_LENGTH, "%jd\t%s", (intmax_t)count, oligonucleotide); destroy_Hash_Table(hash_Table); } int main(int argc, char * argv[]){ char buffer[4096]; // Open the file that was specified as a command line argument. FILE * input_File=fopen(argv[1], "r"); // Find the start of the third polynucleotide. while(fgets(buffer, sizeof(buffer), input_File) && memcmp(">THREE", buffer, sizeof(">THREE")-1)); // Start with 64 KiB of storage for reading in the polynucleotide and grow geometrically. intnative_t polynucleotide_Capacity=65536; intnative_t polynucleotide_Length=0; char * polynucleotide=malloc(polynucleotide_Capacity); // Start reading and encoding the third polynucleotide. while(fgets(buffer, sizeof(buffer), input_File) && buffer[0]!='>'){ for(intnative_t i=0; buffer[i]!='\0'; i++) if(buffer[i]!='\n') polynucleotide[polynucleotide_Length++]=code_For_Nucleotide(buffer[i]); // Make sure we still have enough memory allocated for any potential nucleotides in the next line. if(polynucleotide_Capacity-polynucleotide_Length < sizeof(buffer)) polynucleotide=realloc(polynucleotide, polynucleotide_Capacity*=2); } // Free up any leftover memory. polynucleotide=realloc(polynucleotide, polynucleotide_Length); char output_Buffer[7][MAXIMUM_OUTPUT_LENGTH]; // Do the following functions in parallel. #pragma omp parallel sections { #pragma omp section generate_Frequencies_For_Desired_Length_Oligonucleotides(polynucleotide, polynucleotide_Length, 1 , output_Buffer[0]); #pragma omp section generate_Frequencies_For_Desired_Length_Oligonucleotides(polynucleotide, polynucleotide_Length, 2 , output_Buffer[1]); #pragma omp section generate_Count_For_Oligonucleotide(polynucleotide, polynucleotide_Length, "GGT", output_Buffer[2]); #pragma omp section generate_Count_For_Oligonucleotide(polynucleotide, polynucleotide_Length, "GGTA", output_Buffer[3]); #pragma omp section generate_Count_For_Oligonucleotide(polynucleotide, polynucleotide_Length, "GGTATT", output_Buffer[4]); #pragma omp section generate_Count_For_Oligonucleotide(polynucleotide, polynucleotide_Length, "GGTATTTTAATT", output_Buffer[5]); #pragma omp section generate_Count_For_Oligonucleotide(polynucleotide, polynucleotide_Length, "GGTATTTTAATTTATAGT" , output_Buffer[6]); } for(intnative_t i=0; i<7; printf("%s\n", output_Buffer[i++])); free(polynucleotide); }
GB_binop__eq_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_int32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__eq_int32) // A.*B function (eWiseMult): GB (_AemultB_03__eq_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int32) // A*D function (colscale): GB (_AxD__eq_int32) // D*A function (rowscale): GB (_DxB__eq_int32) // C+=B function (dense accum): GB (_Cdense_accumB__eq_int32) // C+=b function (dense accum): GB (_Cdense_accumb__eq_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int32) // C=scalar+B GB (_bind1st__eq_int32) // C=scalar+B' GB (_bind1st_tran__eq_int32) // C=A+scalar GB (_bind2nd__eq_int32) // C=A'+scalar GB (_bind2nd_tran__eq_int32) // C type: bool // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_INT32 || GxB_NO_EQ_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__eq_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__eq_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__islt_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_uint16) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__islt_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__islt_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_uint16) // A*D function (colscale): GB (_AxD__islt_uint16) // D*A function (rowscale): GB (_DxB__islt_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__islt_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__islt_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_uint16) // C=scalar+B GB (_bind1st__islt_uint16) // C=scalar+B' GB (_bind1st_tran__islt_uint16) // C=A+scalar GB (_bind2nd__islt_uint16) // C=A'+scalar GB (_bind2nd_tran__islt_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_UINT16 || GxB_NO_ISLT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__islt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__islt_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__islt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tsfourier.c
/* ~~~ Time Series Analysis -- auxiliary ~~~ * * Actual calculation of the fourier transform * * Author: Jakob Rørsted Mosumgaard */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include "arrlib.h" #include "fmin.h" #define PI2micro 6.28318530717958647692528676655900576839433879875e-6 #define EPS 1.0e-9 void alpbet(double time[], double flux[], size_t N, double ny, double *alpha, \ double *beta); void alpbetW(double time[], double flux[], double weight[], size_t N,\ double ny, double wsum, double *alpha, double *beta); /* Calculate the fourier transform of time series * * Arguments: * - `time` : Array of times. In seconds! * - `flux` : Array of data. * - `weight` : Array of statistical weights. * - `freq` : Array of cyclic frequencies to sample. * - `N` : Length of the time series * - `M` : Length of the sampling vector * - `power` : OUTPUT -- Array with powers * - `alpha` : OUTPUT -- Array with alphas * - `beta` : OUTPUT -- Array with betas * - `useweight`: Flag to signal whether to use weights or not (0 = no weights) */ void fourier(double time[], double flux[], double weight[], double freq[],\ size_t N, size_t M, double power[], double alpha[], double beta[],\ int useweight) { // Local variables double alp = 0; double bet = 0; double ny = 0; size_t i; // Call functions with or without weights if ( useweight == 0 ) { // Make parallel loop over all test frequencies #pragma omp parallel default(shared) private(alp, bet, ny) { #pragma omp for schedule(static) for (i = 0; i < M; ++i) { // Current frequency ny = freq[i] * PI2micro; // Calculate alpha and beta alpbet(time, flux, N, ny, &alp, &bet); // Store alpha, beta and power alpha[i] = alp; beta[i] = bet; power[i] = alp*alp + bet*bet; } } } else { // Sum of all weights double sumweights = arr_sum(weight, N); // Make parallel loop over all test frequencies #pragma omp parallel default(shared) private(alp, bet, ny) { #pragma omp for schedule(static) for (i = 0; i < M; ++i) { // Current frequency ny = freq[i] * PI2micro; // Calculate alpha and beta alpbetW(time, flux, weight, N, ny, sumweights, &alp, &bet); // Store alpha, beta and power alpha[i] = alp; beta[i] = bet; power[i] = alp*alp + bet*bet; } } } } // Calculate alpha and beta coefficients void alpbet(double time[], double flux[], size_t N, double ny, double *alpha, \ double *beta) { // Auxiliary double sn, cn, D; // Sums double s = 0; double c = 0; double cc = 0; double sc = 0; double ss; // Loop over the time series for (size_t i = 0; i < N; ++i) { // Pre-calculate sin, cos of point sn = sin(ny * time[i]); cn = cos(ny * time[i]); // Calculate sin, cos terms s += flux[i] * sn; c += flux[i] * cn; // Calculate squared and cross terms cc += cn * cn; sc += sn * cn; } // Calculate ss from cc ss = N - cc; // Calculate coefficients D = ss*cc - sc*sc; *alpha = (s * cc - c * sc)/D; *beta = (c * ss - s * sc)/D; } // Calculate alpha and beta coefficients -- USING WEIGHTS void alpbetW(double time[], double flux[], double weight[], size_t N,\ double ny, double wsum, double *alpha, double *beta) { // Auxiliary double sn, cn, D; // Sums double s = 0; double c = 0; double cc = 0; double sc = 0; double ss; // Loop over the time series for (size_t i = 0; i < N; ++i) { // Pre-calculate sin, cos of point sn = sin(ny * time[i]); cn = cos(ny * time[i]); // Calculate sin, cos terms s += weight[i] * flux[i] * sn; c += weight[i] * flux[i] * cn; // Calculate squared and cross terms cc += weight[i] * cn * cn; sc += weight[i] * sn * cn; } // Calculate ss from cc ss = wsum - cc; // Calculate coefficients D = ss*cc - sc*sc; *alpha = (s * cc - c * sc)/D; *beta = (c * ss - s * sc)/D; } /* Calculate the fourier transform of time series and find the highest peak * --> Helper routine for CLEAN * * Arguments: * - `time` : Array of times. In seconds! * - `flux` : Array of data. * - `weight` : Array of statistical weights. * - `freq` : Array of cyclic frequencies to sample. * - `N` : Length of the time series * - `M` : Length of the sampling vector * - `fmax` : OUTPUT -- Frequency of maximum power * - `alpmax` : OUTPUT -- Alpha of that frequency * - `betmax` : OUTPUT -- Beta of that frequency * - `useweight`: Flag to signal whether to use weights or not (0 = no weights) */ void fouriermax(double time[], double flux[], double weight[], double freq[],\ size_t N, size_t M, double *fmax, double *alpmax,\ double *betmax, int useweight) { // Local variables double alpha = 0; double beta = 0; double ny = 0; size_t i; // Local variables for finding the peak double p = 0; double pmaxlocal; double nymaxlocal; // Maximum power (global) double pmax = 0; double nymax = 0; // For optimisation routine double df = PI2micro * (freq[1] - freq[0]); double lim1, lim2; // Call functions with or without weights if ( useweight == 0 ) { // Function for minimisation (nested for variable access) double powopt(double optny) { double optalpha, optbeta, optpower; alpbet(time, flux, N, optny, &optalpha, &optbeta); optpower = optalpha*optalpha + optbeta*optbeta; return -optpower; } // Make parallel loop over all test frequencies #pragma omp parallel default(shared) private(alpha, beta, ny, p, pmaxlocal, nymaxlocal) { // Reset varibles pmaxlocal = 0; nymaxlocal = 0; // Do the loop (nowait -> each threads can move on to comparison) #pragma omp for schedule(static) nowait for (i = 0; i < M; ++i) { // Current frequency ny = freq[i] * PI2micro; // Calculate alpha, beta and power alpbet(time, flux, N, ny, &alpha, &beta); p = alpha*alpha + beta*beta; // Compare to current maximum power if ( p > pmaxlocal ) { pmaxlocal = p; nymaxlocal = ny; } } // Make sure we use the maximum from all the threads // NOTE: Double check, since the critical region is slow and should // only be entered when necessary (and value can be changed // by several threads, see: goo.gl/lwnzTn)! if ( pmaxlocal > pmax ) { #pragma omp critical { if ( pmaxlocal > pmax ) { pmax = pmaxlocal; nymax = nymaxlocal; } } } } // Search around found peak for the "true" minimum // --> Ensure not to go beyond limits if ( nymax-df > PI2micro * freq[0] ) lim1 = nymax-df; else lim1 = PI2micro * freq[0]; if ( nymax+df < PI2micro * freq[M] ) lim2 = nymax+df; else lim2 = PI2micro * freq[M-1]; pmax = - fmin_golden(powopt, lim1, lim2, EPS, &nymax); // Store the optimised values alpbet(time, flux, N, nymax, alpmax, betmax); *fmax = nymax/PI2micro; } else { // Sum of all weights double sumweights = arr_sum(weight, N); // Function for minimisation (nested for variable access) double powopt(double optny) { double optalpha, optbeta, optpower; alpbetW(time, flux, weight, N, optny, sumweights, &optalpha, &optbeta); optpower = optalpha*optalpha + optbeta*optbeta; return -optpower; } // Make parallel loop over all test frequencies #pragma omp parallel default(shared) private(alpha, beta, ny, p, pmaxlocal, nymaxlocal) { // Reset varibles pmaxlocal = 0; nymaxlocal = 0; // Do the loop (nowait -> each threads can move on to comparison) #pragma omp for schedule(static) nowait for (i = 0; i < M; ++i) { // Current frequency ny = freq[i] * PI2micro; // Calculate alpha, beta and power alpbetW(time, flux, weight, N, ny, sumweights, &alpha, &beta); p = alpha*alpha + beta*beta; // Compare to current maximum power if ( p > pmaxlocal ) { pmaxlocal = p; nymaxlocal = ny; } } // Make sure we use the maximum from all the threads // NOTE: Double check, since the critical region is slow and should // only be entered when necessary (and value can be changed // by several threads, see: goo.gl/lwnzTn)! if ( pmaxlocal > pmax ) { #pragma omp critical { if ( pmaxlocal > pmax ) { pmax = pmaxlocal; nymax = nymaxlocal; } } } } // Search around found peak for the "true" minimum // --> Ensure not to go beyond limits if ( nymax-df > PI2micro * freq[0] ) lim1 = nymax-df; else lim1 = PI2micro * freq[0]; if ( nymax+df < PI2micro * freq[M] ) lim2 = nymax+df; else lim2 = PI2micro * freq[M-1]; pmax = - fmin_golden(powopt, lim1, lim2, EPS, &nymax); // Store the optimised values alpbetW(time, flux, weight, N, nymax, sumweights, alpmax, betmax); *fmax = nymax/PI2micro; } // Done! }
GB_unaryop__ainv_int16_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int16_int32 // op(A') function: GB_tran__ainv_int16_int32 // C type: int16_t // A type: int32_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int16_int32 ( int16_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int16_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dropout_op.h
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <cstring> #include <random> #include <string> #include <algorithm> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/generator.h" #include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T, int MajorType = Eigen::RowMajor, typename IndexType = Eigen::DenseIndex> using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>; template <typename T, int MajorType = Eigen::RowMajor, typename IndexType = Eigen::DenseIndex> using EigenVector = framework::EigenVector<T, MajorType, IndexType>; template <typename DeviceContext, typename T> class CPUDropoutKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input<Tensor>("X"); auto* seed = context.HasInput("Seed") ? context.Input<Tensor>("Seed") : nullptr; auto* y = context.Output<Tensor>("Out"); const auto* x_data = x->data<T>(); auto* y_data = y->mutable_data<T>(context.GetPlace()); float dropout_prob = context.Attr<float>("dropout_prob"); auto& dropout_implementation = context.Attr<std::string>("dropout_implementation"); bool upscale_in_train = (dropout_implementation == "upscale_in_train"); if (!context.Attr<bool>("is_test")) { auto* mask = context.Output<Tensor>("Mask"); auto* mask_data = mask->mutable_data<uint8_t>(context.GetPlace()); size_t size = framework::product(mask->dims()); // Special case when dropout_prob is 1.0 if (dropout_prob == 1.0f) { std::memset(y_data, 0, size * sizeof(*y_data)); // NOLINT std::memset(mask_data, 0, size * sizeof(*mask_data)); // NOLINT return; } // std::minstd_rand engine; // NOTE: fixed seed should only be used in unittest or for debug. // Guarantee to use random seed in training. int seed_data = 0; if (seed) { seed_data = *(seed->data<int>()); } else { seed_data = context.Attr<bool>("fix_seed") ? context.Attr<int>("seed") : 0; } auto engine = framework::GetCPURandomEngine(seed_data); std::uniform_real_distribution<float> dist(0, 1); for (size_t i = 0; i < size; ++i) { if (dist(*engine) < dropout_prob) { mask_data[i] = 0; y_data[i] = 0; } else { mask_data[i] = 1; if (upscale_in_train) { y_data[i] = x_data[i] / static_cast<T>(1.0f - dropout_prob); } else { y_data[i] = x_data[i]; } } } } else { if (upscale_in_train) { const auto* X_data = x->data<T>(); auto* Y_data = y->mutable_data<T>(context.GetPlace()); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < x->numel(); i++) { Y_data[i] = X_data[i]; } } else { auto X = EigenMatrix<T>::Reshape(*x, 1); auto Y = EigenMatrix<T>::Reshape(*y, 1); auto& place = *context.template device_context<DeviceContext>().eigen_device(); Y.device(place) = X * static_cast<T>(1.0f - dropout_prob); } } } }; template <typename DeviceContext, typename T> class DropoutGradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* grad_x = context.Output<Tensor>(framework::GradVarName("X")); auto* grad_y = context.Input<Tensor>(framework::GradVarName("Out")); auto* mask = context.Input<Tensor>("Mask"); grad_x->mutable_data<T>(context.GetPlace()); auto dX = EigenVector<T>::Flatten(*grad_x); auto dY = EigenVector<T>::Flatten(*grad_y); auto& place = *context.template device_context<DeviceContext>().eigen_device(); auto& dropout_implementation = context.Attr<std::string>("dropout_implementation"); if (context.Attr<bool>("is_test") == true) { if (dropout_implementation == "upscale_in_train") { dX.device(place) = static_cast<T>(1) * dY; } else { float dropout_prob = context.Attr<float>("dropout_prob"); dX.device(place) = dY * static_cast<T>(1.0f - dropout_prob); } } else { auto M = EigenVector<uint8_t>::Flatten(*mask); if (dropout_implementation == "upscale_in_train") { float dropout_prob = context.Attr<float>("dropout_prob"); if (dropout_prob == 1.0f) { dX.device(place) = static_cast<T>(0) * dY; } else { dX.device(place) = dY * M.cast<T>() / static_cast<T>(1.0f - dropout_prob); } } else { dX.device(place) = dY * M.cast<T>(); } } } }; } // namespace operators } // namespace paddle
spalart_allmaras_turbulence_model.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // Riccardo Rossi // #if !defined(KRATOS_SPALART_ALLMARAS_TURBULENCE_H_INCLUDED ) #define KRATOS_SPALART_ALLMARAS_TURBULENCE_H_INCLUDED // System includes #include <string> #include <iostream> // External includes // Project includes #include "includes/define.h" #include "containers/model.h" #include "processes/process.h" #include "includes/cfd_variables.h" #include "solving_strategies/strategies/solving_strategy.h" //#include "solving_strategies/strategies/residualbased_linear_strategy.h" #include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h" // #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" #include "solving_strategies/schemes/residualbased_incremental_aitken_static_scheme.h" #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_componentwise.h" #include "solving_strategies/convergencecriterias/residual_criteria.h" // Application includes #include "custom_utilities/periodic_condition_utilities.h" #include "fluid_dynamics_application_variables.h" namespace Kratos { ///@addtogroup FluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// An impelementation of the Spalart-Allmaras turbulence model for incompressible flows. /** Detail class definition. */ template<class TSparseSpace, class TDenseSpace, class TLinearSolver > class SpalartAllmarasTurbulenceModel : public Process { public: ///@name Type Definitions ///@{ /// Pointer definition of SpalartAllmarasTurbulenceModel KRATOS_CLASS_POINTER_DEFINITION(SpalartAllmarasTurbulenceModel); ///@} ///@name Life Cycle ///@{ /// Constructor for the Spalart-Allmaras turbulence model. /** * @param rModelPart ModelPart for the flow problem * @param pLinearSolver Pointer to the linear solver to use in the solution of the viscosity transport problem * @param DomainSize Spatial dimension of the problem (2 or 3) * @param NonLinearTol Relative tolerance for the turbulent viscosity transport problem (convergence is checked using the norm of the residual) * @param MaxIter Maximum number of iterations for the solution of the viscosity transport problem * @param ReformDofSet True if the degrees of freedom change during the problem (for example due to remeshing) false otherwise * @param TimeOrder Order for time integration (1 - Backward Euler will be used, 2 - BDF2 method) */ SpalartAllmarasTurbulenceModel( ModelPart& rModelPart, typename TLinearSolver::Pointer pLinearSolver, unsigned int DomainSize, double NonLinearTol, unsigned int MaxIter, bool ReformDofSet, unsigned int TimeOrder) : mr_model_part(rModelPart), mrSpalartModelPart(rModelPart.GetModel().CreateModelPart("SpalartModelPart")), mdomain_size(DomainSize), mtol(NonLinearTol), mmax_it(MaxIter), mtime_order(TimeOrder), madapt_for_fractional_step(false) { //************************************************************************************************ //check that the variables needed are in the model part if (!(rModelPart.NodesBegin()->SolutionStepsDataHas(DISTANCE))) KRATOS_THROW_ERROR(std::logic_error, "Variable is not in the model part:", DISTANCE); if (!(rModelPart.NodesBegin()->SolutionStepsDataHas(VELOCITY))) KRATOS_THROW_ERROR(std::logic_error, "Variable is not in the model part:", VELOCITY); if (!(rModelPart.NodesBegin()->SolutionStepsDataHas(MOLECULAR_VISCOSITY))) KRATOS_THROW_ERROR(std::logic_error, "Variable is not in the model part:", MOLECULAR_VISCOSITY); if (!(rModelPart.NodesBegin()->SolutionStepsDataHas(TURBULENT_VISCOSITY))) KRATOS_THROW_ERROR(std::logic_error, "Variable is not in the model part:", TURBULENT_VISCOSITY); if (!(rModelPart.NodesBegin()->SolutionStepsDataHas(MESH_VELOCITY))) KRATOS_THROW_ERROR(std::logic_error, "Variable is not in the model part:", MESH_VELOCITY); if (!(rModelPart.NodesBegin()->SolutionStepsDataHas(VISCOSITY))) KRATOS_THROW_ERROR(std::logic_error, "Variable is not in the model part:", VISCOSITY); if (!(rModelPart.NodesBegin()->SolutionStepsDataHas(NODAL_AREA))) KRATOS_THROW_ERROR(std::logic_error, "Variable is not in the model part:", NODAL_AREA); if (!(rModelPart.NodesBegin()->SolutionStepsDataHas(TEMP_CONV_PROJ))) KRATOS_THROW_ERROR(std::logic_error, "Variable is not in the model part:", TEMP_CONV_PROJ); if (mr_model_part.GetBufferSize() < 3) KRATOS_THROW_ERROR(std::logic_error, "insufficient buffer size for BDF2, currently buffer size is ", mr_model_part.GetBufferSize()); //************************************************************************************************ //construct a new auxiliary model part mrSpalartModelPart.GetNodalSolutionStepVariablesList() = mr_model_part.GetNodalSolutionStepVariablesList(); mrSpalartModelPart.SetBufferSize(3); mrSpalartModelPart.Nodes() = mr_model_part.Nodes(); mrSpalartModelPart.SetProcessInfo(mr_model_part.pGetProcessInfo()); mrSpalartModelPart.SetProperties(mr_model_part.pProperties()); std::string ElementName; if (DomainSize == 2) ElementName = std::string("SpalartAllmaras2D"); else ElementName = std::string("SpalartAllmaras3D"); const Element& rReferenceElement = KratosComponents<Element>::Get(ElementName); //generating the elements for (ModelPart::ElementsContainerType::iterator iii = mr_model_part.ElementsBegin(); iii != mr_model_part.ElementsEnd(); iii++) { Properties::Pointer properties = iii->pGetProperties(); Element::Pointer p_element = rReferenceElement.Create(iii->Id(), iii->GetGeometry(), properties); mrSpalartModelPart.Elements().push_back(p_element); } // pointer types for the solution strategy construcion typedef typename Scheme< TSparseSpace, TDenseSpace >::Pointer SchemePointerType; typedef typename ConvergenceCriteria< TSparseSpace, TDenseSpace >::Pointer ConvergenceCriteriaPointerType; typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer; typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType; // Solution scheme: Aitken iterations const double DefaultAitkenOmega = 1.0; SchemePointerType pScheme = SchemePointerType( new ResidualBasedIncrementalAitkenStaticScheme< TSparseSpace, TDenseSpace > (DefaultAitkenOmega) ); // SchemePointerType pScheme = SchemePointerType( new ResidualBasedIncrementalUpdateStaticScheme< TSparseSpace, TDenseSpace > () ); // Convergence criteria const double NearlyZero = 1.0e-20; ConvergenceCriteriaPointerType pConvCriteria = ConvergenceCriteriaPointerType( new ResidualCriteria<TSparseSpace,TDenseSpace>(NonLinearTol,NearlyZero) ); // Builder and solver BuilderSolverTypePointer pBuildAndSolver = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> > (pLinearSolver, TURBULENT_VISCOSITY)); // Strategy bool CalculateReactions = false; bool MoveMesh = false; mpSolutionStrategy = StrategyPointerType( new ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(mrSpalartModelPart,pScheme,pConvCriteria,pBuildAndSolver,MaxIter,CalculateReactions,ReformDofSet,MoveMesh)); mpSolutionStrategy->SetEchoLevel(0); mpSolutionStrategy->Check(); } /// Destructor. ~SpalartAllmarasTurbulenceModel() override { Model& r_model = mrSpalartModelPart.GetModel(); r_model.DeleteModelPart("SpalartModelPart"); } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /// Solve an iteration of the turbulent viscosity void Execute() override { KRATOS_TRY if(madapt_for_fractional_step == true) { if (!(mrSpalartModelPart.NodesBegin()->SolutionStepsDataHas(FRACT_VEL))) KRATOS_THROW_ERROR(std::logic_error, "Variable is not in the model part:", FRACT_VEL); #pragma omp parallel for for (int i = 0; i < static_cast<int>(mrSpalartModelPart.Nodes().size()); i++) { ModelPart::NodesContainerType::iterator it = mrSpalartModelPart.NodesBegin() + i; it->FastGetSolutionStepValue(VELOCITY) = it->FastGetSolutionStepValue(FRACT_VEL); } } AuxSolve(); //update viscosity on the nodes for (ModelPart::NodeIterator i = mrSpalartModelPart.NodesBegin(); i != mrSpalartModelPart.NodesEnd(); ++i) { double molecular_viscosity = i->FastGetSolutionStepValue(MOLECULAR_VISCOSITY); double turbulent_viscosity = i->FastGetSolutionStepValue(TURBULENT_VISCOSITY); if(turbulent_viscosity < 0) { i->FastGetSolutionStepValue(TURBULENT_VISCOSITY) = 1e-9; i->FastGetSolutionStepValue(VISCOSITY) = molecular_viscosity; } else { const double cv1_3 = 7.1*7.1*7.1; double xi = turbulent_viscosity / molecular_viscosity; double xi_3 = xi*xi*xi; double fv1 = xi_3 / (xi_3 + cv1_3); double viscosity = fv1 * turbulent_viscosity + molecular_viscosity; i->FastGetSolutionStepValue(VISCOSITY) = viscosity; } } KRATOS_CATCH(""); } void SetMaxIterations(unsigned int max_it) { KRATOS_TRY mmax_it = max_it; KRATOS_CATCH(""); } void AdaptForFractionalStep() { KRATOS_TRY madapt_for_fractional_step = true; KRATOS_CATCH(""); } void ActivateDES(double CDES) { KRATOS_TRY; mrSpalartModelPart.GetProcessInfo()[C_DES] = CDES; /* //update viscosity on the nodes for (ModelPart::NodeIterator i = mrSpalartModelPart.NodesBegin(); i != mrSpalartModelPart.NodesEnd(); ++i) { double distance = i->FastGetSolutionStepValue(DISTANCE); const array_1d<double,3>& xc = i->Coordinates(); double h_max = 0.0; //compute nodal h (by max edge size) GlobalPointersVector<Node<3> >& neigbours = i->GetValue(NEIGHBOUR_NODES); for(GlobalPointersVector<Node<3> >::iterator ineighb=neigbours.begin(); ineighb!=neigbours.end(); ineighb++) { array_1d<double,3> aux = ineighb->Coordinates(); aux -= xc; double h = norm_2(aux); if(h > h_max) h_max=h; } if(h_max == 0.0) KRATOS_THROW_ERROR(std::logic_error,"unexpected isolated node. Wrong node has Id ",i->Id()); if(distance > h_max*CDES) i->FastGetSolutionStepValue(DISTANCE) = h_max*CDES; }*/ KRATOS_CATCH(""); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "SpalartAllmarasTurbulenceModel"; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << "SpalartAllmarasTurbulenceModel"; } /// Print object's data. void PrintData(std::ostream& rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ModelPart& mr_model_part; ModelPart& mrSpalartModelPart; unsigned int mdomain_size; double mtol; unsigned int mmax_it; unsigned int mtime_order; bool madapt_for_fractional_step; typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer mpSolutionStrategy; ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ /// Protected constructor, initializing only the references (for derived classes) SpalartAllmarasTurbulenceModel(ModelPart& rModelPart) : Process(), mr_model_part(rModelPart), mrSpalartModelPart(rModelPart.GetModel().CreateModelPart("SpalartModelPart")) {} ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ //********************************************************************************* //********************************************************************** /*double*/ void AuxSolve() { KRATOS_TRY //calculate the BDF coefficients ProcessInfo& rCurrentProcessInfo = mrSpalartModelPart.GetProcessInfo(); double Dt = rCurrentProcessInfo[DELTA_TIME]; if (mtime_order == 2) { double dt_old = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME]; double rho = dt_old / Dt; double coeff = 1.0 / (Dt * rho * rho + Dt * rho); Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(3); BDFcoeffs[0] = coeff * (rho * rho + 2.0 * rho); //coefficient for step n+1 BDFcoeffs[1] = -coeff * (rho * rho + 2.0 * rho + 1.0); //coefficient for step n BDFcoeffs[2] = coeff; } else { Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(2); BDFcoeffs[0] = 1.0 / Dt; //coefficient for step n+1 BDFcoeffs[1] = -1.0 / Dt; //coefficient for step n } // unsigned int iter = 0; // double ratio; // bool is_converged = false; // double dT_norm = 0.0; // double T_norm = 0.0; int current_fract_step = rCurrentProcessInfo[FRACTIONAL_STEP]; rCurrentProcessInfo[FRACTIONAL_STEP] = 2; CalculateProjection(); rCurrentProcessInfo[FRACTIONAL_STEP] = 1; mpSolutionStrategy->Solve(); rCurrentProcessInfo[FRACTIONAL_STEP] = current_fract_step; // while (iter++ < mmax_it && is_converged == false) // { // rCurrentProcessInfo[FRACTIONAL_STEP] = 1; // dT_norm = mpSolutionStrategy->Solve(); // T_norm = CalculateVarNorm(); // CalculateProjection(); //// KRATOS_WATCH(dT_norm) //// KRATOS_WATCH(T_norm) // ratio = 1.00; // if (T_norm != 0.00) // ratio = dT_norm / T_norm; // else // { // std::cout << "Nu norm = " << T_norm << " dNu_norm = " << dT_norm << std::endl; // } // if (dT_norm < 1e-11) // ratio = 0; //converged // if (ratio < mtol) // is_converged = true; // std::cout << " SA iter = " << iter << " ratio = " << ratio << std::endl; // } // return dT_norm; KRATOS_CATCH("") } //****************************************************************************************************** //****************************************************************************************************** ///calculation of temperature norm double CalculateVarNorm() { KRATOS_TRY; double norm = 0.00; for (ModelPart::NodeIterator i = mrSpalartModelPart.NodesBegin(); i != mrSpalartModelPart.NodesEnd(); ++i) { norm += pow(i->FastGetSolutionStepValue(TURBULENT_VISCOSITY), 2); } return sqrt(norm); KRATOS_CATCH("") } ///calculation of projection void CalculateProjection() { KRATOS_TRY; ProcessInfo& rCurrentProcessInfo = mrSpalartModelPart.GetProcessInfo(); //first of all set to zero the nodal variables to be updated nodally for (ModelPart::NodeIterator i = mrSpalartModelPart.NodesBegin(); i != mrSpalartModelPart.NodesEnd(); ++i) { (i)->FastGetSolutionStepValue(TEMP_CONV_PROJ) = 0.00; (i)->FastGetSolutionStepValue(NODAL_AREA) = 0.00; } //add the elemental contributions for the calculation of the velocity //and the determination of the nodal area for (ModelPart::ElementIterator i = mrSpalartModelPart.ElementsBegin(); i != mrSpalartModelPart.ElementsEnd(); ++i) { (i)->InitializeSolutionStep(rCurrentProcessInfo); } Communicator& rComm = mrSpalartModelPart.GetCommunicator(); rComm.AssembleCurrentData(NODAL_AREA); rComm.AssembleCurrentData(TEMP_CONV_PROJ); // Obtain nodal projection of the residual for (ModelPart::NodeIterator i = mrSpalartModelPart.NodesBegin(); i != mrSpalartModelPart.NodesEnd(); ++i) { const double NodalArea = i->FastGetSolutionStepValue(NODAL_AREA); if(NodalArea > 0.0) { double& rConvProj = i->FastGetSolutionStepValue(TEMP_CONV_PROJ); rConvProj /= NodalArea; } } KRATOS_CATCH("") } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. SpalartAllmarasTurbulenceModel & operator=(SpalartAllmarasTurbulenceModel const& rOther) { return *this; } /// Copy constructor. SpalartAllmarasTurbulenceModel(SpalartAllmarasTurbulenceModel const& rOther) : mr_model_part(rOther.mr_model_part), mdomain_size(rOther.mdomain_size) { } ///@} }; // Class SpalartAllmarasTurbulenceModel ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function template<class TSparseSpace, class TDenseSpace, class TLinearSolver > inline std::istream & operator >>(std::istream& rIStream, SpalartAllmarasTurbulenceModel<TSparseSpace, TDenseSpace, TLinearSolver>& rThis) { return rIStream; } /// output stream function template<class TSparseSpace, class TDenseSpace, class TLinearSolver > inline std::ostream & operator <<(std::ostream& rOStream, const SpalartAllmarasTurbulenceModel<TSparseSpace, TDenseSpace, TLinearSolver>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block } // namespace Kratos. #endif // KRATOS_SPALART_ALLMARAS_TURBULENCE_H_INCLUDED defined
GB_unaryop__ainv_uint64_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint64_uint16 // op(A') function: GB_tran__ainv_uint64_uint16 // C type: uint64_t // A type: uint16_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint64_uint16 ( uint64_t *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint64_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__log_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log_fp64_fp64) // op(A') function: GB (_unop_tran__log_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = log (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = log (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = log (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = log (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = log (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sparse_matrix_utils.h
/*! * This file is part of GPBoost a C++ library for combining * boosting with Gaussian process and mixed effects models * * Copyright (c) 2020 Fabio Sigrist. All rights reserved. * * Licensed under the Apache License Version 2.0. See LICENSE file in the project root for license information. */ #ifndef GPB_SPARSE_MAT_H_ #define GPB_SPARSE_MAT_H_ #include <memory> #include <GPBoost/type_defs.h> extern "C" { #include <cs.h> } namespace GPBoost { /*! * \brief Solve equation system with a dense lower triangular matrix as left-hand side (Lx=b) * \param val Values of lower triangular matrix L in column-major format * \param ncol Number of columns * \param[out] x Right-hand side vector (solution written on input) */ void L_solve(const double* val, const int ncol, double* x); /*! * \brief Solve equation system with the transpose of a dense lower triangular matrix as left-hand side (L'x=b) * \param val Values of lower triangular matrix L in column-major format * \param ncol Number of columns * \param[out] x Right-hand side vector (solution written on input) */ void L_t_solve(const double* val, const int ncol, double* x); /*! * \brief Solve equation system with a sparse lower triangular matrix as left-hand side (Lx=b) * \param val Values of sparse lower triangular matrix L * \param row_idx Row indices corresponding to the values ('InnerIndices' in Eigen) * \param col_ptr val indexes where each column starts ('OuterStarts' in Eigen) * \param ncol Number of columns * \param[out] x Right-hand side vector (solution written on input) */ void sp_L_solve(const double* val, const int* row_idx, const int* col_ptr, const int ncol, double* x); /*! * \brief Solve equation system with the transpose of a sparse lower triangular matrix as left-hand side: (L'x=b) * \param val Values of sparse lower triangular matrix L * \param row_idx Row indices corresponding to the values ('InnerIndices' in Eigen) * \param col_ptr val indexes where each column starts ('OuterStarts' in Eigen) * \param ncol Number of columns * \param[out] x Right-hand side vector (solution written on input) */ void sp_L_t_solve(const double* val, const int* row_idx, const int* col_ptr, const int ncol, double* x); /*! * \brief Solve equation system with a sparse left-hand side and a sparse right-hand side (Ax=B) using CSparse function cs_spsolve * \param A left-hand side * \param B right-hand side * \param[out] Solution A^(-1)B * \param lower true if A is a lower triangular matrix */ void sp_Lower_sp_RHS_cs_solve(cs* A, cs* B, sp_mat_t& A_inv_B, bool lower = true); /*! * \brief Solve equation system with a sparse left-hand side and a sparse right-hand side (Ax=B) using CSparse function cs_spsolve * \param A left-hand side. Sparse Eigen matrix is column major format * \param B right-hand side. Sparse Eigen matrix is column major format * \param[out] Solution A^(-1)B * \param lower true if A is a lower triangular matrix */ void eigen_sp_Lower_sp_RHS_cs_solve(sp_mat_t& A, sp_mat_t& B, sp_mat_t& A_inv_B, bool lower = true); /*! * \brief Solve equation system with a sparse left-hand side and a sparse right-hand side (Ax=B) * \param A left-hand side. Sparse Eigen matrix is column major format * \param B right-hand side. Sparse Eigen matrix is column major format * \param[out] Solution A^(-1)B * \param lower true if A is a lower triangular matrix */ void eigen_sp_Lower_sp_RHS_solve(sp_mat_t& A, sp_mat_t& B, sp_mat_t& A_inv_B, bool lower = true); /*! * \brief Caclulate L\H =(L^-1H) if sparse matrices are used. Used in 'CalcGradNegMargLikelihoodLAApprox' for non-Gaussian data * \param L lower (or upper) triangular matrix (Cholesky factor) * \param H Right-hand side matrix H * \param LInvH[out] L\H =(L^-1H) * \param lower true if L is a lower triangular matrix */ void CalcLInvH(sp_mat_t& L, sp_mat_t& H, sp_mat_t& LInvH, bool lower = true); /*! * \brief Caclulate L\H =(L^-1H) if sparse matrices are used. Used in 'CalcGradNegMargLikelihoodLAApprox' for non-Gaussian data * \param L lower (or upper) triangular matrix (Cholesky factor) * \param H Right-hand side matrix H * \param LInvH[out] L\H =(L^-1H) * \param lower true if L is a lower triangular matrix */ void CalcLInvH(sp_mat_t& L, den_mat_t& H, den_mat_t& LInvH, bool lower = true); /*! * \brief Caclulate L\H =(L^-1H) if dense matrices are used. Used in 'CalcGradNegMargLikelihoodLAApprox' for non-Gaussian data * \param L lower (or upper) triangular matrix (Cholesky factor) * \param H Right-hand side matrix H * \param LInvH[out] L\H =(L^-1H) * \param lower true if L is a lower triangular matrix */ void CalcLInvH(den_mat_t& L, den_mat_t& H, den_mat_t& LInvH, bool lower = true); /*! * \brief Caclulate L\H =(L^-1H) if dense matrices are used but H is sparse. Used in 'CalcGradNegMargLikelihoodLAApprox' for non-Gaussian data * \param L lower (or upper) triangular matrix (Cholesky factor) * \param H Right-hand side matrix H * \param LInvH[out] L\H =(L^-1H) * \param lower true if L is a lower triangular matrix */ void CalcLInvH(den_mat_t& L, sp_mat_t& H, den_mat_t& LInvH, bool lower = true); // /*! // * \brief Caclulate L\H =(L^-1H) if sparse matrices are used. Used in 'CalcGradNegMargLikelihoodLAApprox' for non-Gaussian data // * \param L lower (or upper) triangular matrix (Cholesky factor) // * \param H Right-hand side matrix H // * \param LInvH[out] L\H =(L^-1H) // * \param lower true if L is a lower triangular matrix // */ // //template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > // void CalcLInvH(sp_mat_t& L, sp_mat_t& H, sp_mat_t& LInvH, bool lower = true) { // eigen_sp_Lower_sp_RHS_solve(L, H, LInvH, lower); // //TODO: use eigen_sp_Lower_sp_RHS_cs_solve -> faster? (currently this crashes due to Eigen bug, see the definition of sp_Lower_sp_RHS_cs_solve for more details) // } // // /*! // * \brief Caclulate L\H =(L^-1H) if dense matrices are used. Used in 'CalcGradNegMargLikelihoodLAApprox' for non-Gaussian data // * \param L lower (or upper) triangular matrix (Cholesky factor) // * \param H Right-hand side matrix H // * \param LInvH[out] L\H =(L^-1H) // * \param lower true if L is a lower triangular matrix // */ // //template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > // void CalcLInvH(den_mat_t& L, den_mat_t& H, den_mat_t& LInvH, bool lower = true) { // LInvH = H; // int ncols = (int)L.cols(); //#pragma omp parallel for schedule(static) // for (int j = 0; j < (int)H.cols(); ++j) { // if (lower) { // L_solve(L.data(), ncols, LInvH.data() + j * ncols); // } // else { // L_t_solve(L.data(), ncols, LInvH.data() + j * ncols); // } // } // } // // /*! // * \brief Caclulate L\H =(L^-1H) if dense matrices are used but H is sparse. Used in 'CalcGradNegMargLikelihoodLAApprox' for non-Gaussian data // * \param L lower (or upper) triangular matrix (Cholesky factor) // * \param H Right-hand side matrix H // * \param LInvH[out] L\H =(L^-1H) // * \param lower true if L is a lower triangular matrix // */ // //template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > // void CalcLInvH(den_mat_t& L, sp_mat_t& H, den_mat_t& LInvH, bool lower = true) { // LInvH = den_mat_t(H); // int ncols = (int)L.cols(); //#pragma omp parallel for schedule(static) // for (int j = 0; j < (int)H.cols(); ++j) { // if (lower) { // L_solve(L.data(), ncols, LInvH.data() + j * ncols); // } // else { // L_t_solve(L.data(), ncols, LInvH.data() + j * ncols); // } // } // } } // namespace GPBoost #endif // GPB_SPARSE_MAT_H_
gmx_amide.c
/* gmx_amide.c is the main source file for the program g_amide, used for estimating * site energies, coupling constants, and transition dipole moment parameters * for Amide I vibrations of protein and peptides systems. * * Written by Mike Reppert (2015) */ #include "gmx_amide_map.h" #include "gmx_amide_bond.h" #include "gmx_amide_charge.h" #include "mem_helper.h" #include "statutil.h" #include "copyrite.h" #include "sysstuff.h" #include "txtdump.h" #include "futil.h" #include "tpxio.h" #include "physics.h" #include "macros.h" #include "gmx_fatal.h" #include "index.h" #include "smalloc.h" #include "vec.h" #include "xvgr.h" #include "gstat.h" #include "string2.h" #include "pbc.h" #include "bondf.h" #if OMP_PARALLEL #include "omp.h" #endif /******************************************************************************** * Site Assignments * ********************************************************************************/ int find_map_sites( t_pbc pbc, t_topology top, rvec *x, t_amide_map map, t_protbond *p_pb, int bonds ) { int b,i,j,at,at0; for(b=0; b<bonds; b++) { p_pb[b].MapAtoms = (int**) malloc(map.nsites*sizeof(int*)); if(p_pb[b].MapAtoms==NULL) { printf("Error allocating memory for MapAtoms of bond %d.\n", b+1); return 0; } p_pb[b].nsites = map.nsites; p_pb[b].natoms = (int*) malloc(map.nsites*sizeof(int)); if(p_pb[b].natoms==NULL) { printf("Error allocating memory for AtomSite numbers of bond %d.\n", b+1); return 0; } for(i=0; i<map.nsites; i++) p_pb[b].natoms[i] = 0; for(i=0; i<map.nsites; i++) { p_pb[b].MapAtoms[i] = (int*) malloc(map.MapSites[i].natoms*sizeof(int)); if(p_pb[b].MapAtoms[i]==NULL) { printf("Error allocating memory for MapAtoms of bond %d, map site %d.\n", b+1, i+1); return 0; } p_pb[b].natoms[i] = map.MapSites[i].natoms; for(j=0; j<map.MapSites[i].natoms; j++) { at0 = -1; if(strncmp(map.MapSites[i].AtomPaths[j].Path[0], "C", 6)==0) at0 = p_pb[b].C; else if(strncmp(map.MapSites[i].AtomPaths[j].Path[0], "O", 6)==0) at0 = p_pb[b].O; else if(strncmp(map.MapSites[i].AtomPaths[j].Path[0], "N", 6)==0) at0 = p_pb[b].N; else if(strncmp(map.MapSites[i].AtomPaths[j].Path[0], "H", 6)==0) at0 = p_pb[b].H; if(at0==-1) { printf("Error finding mapping atoms. Mapping atom path #%d starts at ", i); printf("an atom (%s) not found in the peptide bond.\n", map.MapSites[i].AtomPaths[j].Path[0]); return 0; } // Should there be an error-catch here to be sure that at has been assigned? at = trace_path(&pbc, top, x, at0, map.MapSites[i].AtomPaths[j]); p_pb[b].MapAtoms[i][j] = at; } } } return 1; } int find_coup_sites( t_pbc pbc, t_topology top, rvec *x, t_amide_map map, t_protbond *p_pb, int bonds ) { int b,i,j,at,at0; for(b=0; b<bonds; b++) { p_pb[b].CoupAtoms = (int**) malloc(map.ncoupsites*sizeof(int*)); if(p_pb[b].CoupAtoms==NULL) { printf("Error allocating memory for CoupAtoms of bond %d.\n", b+1); return 0; } p_pb[b].ncoupsites = map.ncoupsites; p_pb[b].ncoupatoms = (int*) malloc(map.ncoupsites*sizeof(int)); if(p_pb[b].ncoupatoms==NULL) { printf("Error allocating memory for CoupSite numbers of bond %d.\n", b+1); return 0; } for(i=0; i<map.ncoupsites; i++) p_pb[b].ncoupatoms[i] = 0; for(i=0; i<map.ncoupsites; i++) { p_pb[b].CoupAtoms[i] = (int*) malloc(map.CoupSites[i].natoms*sizeof(int)); if(p_pb[b].CoupAtoms[i]==NULL) { printf("Error allocating memory for CoupAtoms of bond %d, coupling site %d.\n", b+1, i+1); return 0; } p_pb[b].ncoupatoms[i] = map.CoupSites[i].natoms; for(j=0; j<map.CoupSites[i].natoms; j++) { at0 = -1; if(strncmp(map.CoupSites[i].AtomPaths[j].Path[0], "C", 6)==0) at0 = p_pb[b].C; else if(strncmp(map.CoupSites[i].AtomPaths[j].Path[0], "O", 6)==0) at0 = p_pb[b].O; else if(strncmp(map.CoupSites[i].AtomPaths[j].Path[0], "N", 6)==0) at0 = p_pb[b].N; else if(strncmp(map.CoupSites[i].AtomPaths[j].Path[0], "H", 6)==0) at0 = p_pb[b].H; if(at0==-1) { printf("Error finding coupling site atoms. Coupling atom path #%d starts at ", i); printf("an atom (%s) not found in the peptide bond.\n", map.CoupSites[i].AtomPaths[j].Path[0]); return 0; } // Should there be an error-catch here to be sure that at has been assigned? at = trace_path(&pbc, top, x, at0, map.CoupSites[i].AtomPaths[j]); p_pb[b].CoupAtoms[i][j] = at; } } } return 1; } // Identify excluded atoms for each amide bond. int find_excluded_atoms( t_pbc pbc, t_topology top, rvec *x, t_amide_map map, t_protbond *p_pb, int bonds) { int b,i,at,at0; for(b=0; b<bonds; b++) { p_pb[b].nexcluded = 0; int nfound = 0; int nalloc = 0; int* ExArray; // Our initial guess is that there are just map.npaths excluded atoms. Could be more or less. ExArray = (int*) malloc(map.npaths*sizeof(int)); nalloc = map.npaths; if(ExArray==NULL) { printf("Error allocating memory for excluded atom indices of bond %d.\n", b+1); return 0; } for(i=0; i<map.npaths; i++) { at0 = -1; // First, identify the starting point for the path, i.e. the index of the atom // corresponding to the first path entry. if(strncmp(map.ExcludedAtomPaths[i].Path[0], "C", 6)==0) at0 = p_pb[b].C; else if(strncmp(map.ExcludedAtomPaths[i].Path[0], "O", 6)==0) at0 = p_pb[b].O; else if(strncmp(map.ExcludedAtomPaths[i].Path[0], "N", 6)==0) at0 = p_pb[b].N; else if(strncmp(map.ExcludedAtomPaths[i].Path[0], "H", 6)==0) at0 = p_pb[b].H; else if(strncmp(map.ExcludedAtomPaths[i].Path[0], "CD", 6)==0) at0 = p_pb[b].H; if(at0==-1) { printf("Error finding excluded atoms. Excluded atom path #%d starts at ", i); printf("an atom (%s) not found in the peptide bond.\n", map.ExcludedAtomPaths[i].Path[0]); return 0; } // Next, trace the atom indices from at0 to the end of the path. // If the path is incomplete (which it will be most of the time) // at<0 is returned. at = trace_path(&pbc, top, x, at0, map.ExcludedAtomPaths[i]); if(at>=0) { int oldat; int isnew = 1; // If we find a new atom, first check if it's alredy included in ExArray. // If not, go ahead and add it. for(oldat=0; oldat<nfound; oldat++) { if(ExArray[oldat]==at) isnew = 0; } if(isnew) { ExArray[nfound] = at; nfound++; } } } // Now trim the allocated memory to keep only the indices for found atoms. p_pb[b].Excluded = (int*) realloc(ExArray, nfound*sizeof(int)); if( (p_pb[b].Excluded==NULL) && (nfound!=0) ) { printf("Error trimming excess memory from protein bond %d excluded atoms array.\n", b+1); p_pb[b].nexcluded = 0; return 0; } p_pb[b].nexcluded = nfound; } return 1; } // Identify Coupling sites for each amide bond int find_coupling_sites( t_pbc pbc, t_topology top, rvec *x, t_amide_map map, t_protbond *p_pb, int bonds) { int b,i,at,at0; for(b=0; b<bonds; b++) { p_pb[b].nexcluded = 0; int nfound = 0; int nalloc = 0; int* ExArray; // Our initial guess is that there are just map.npaths excluded atoms. Could be more or less. ExArray = (int*) malloc(map.npaths*sizeof(int)); nalloc = map.npaths; if(ExArray==NULL) { printf("Error allocating memory for excluded atom indices of bond %d.\n", b+1); return 0; } for(i=0; i<map.npaths; i++) { at0 = -1; // First, identify the starting point for the path, i.e. the index of the atom // corresponding to the first path entry. if(strncmp(map.ExcludedAtomPaths[i].Path[0], "C", 6)==0) at0 = p_pb[b].C; else if(strncmp(map.ExcludedAtomPaths[i].Path[0], "O", 6)==0) at0 = p_pb[b].O; else if(strncmp(map.ExcludedAtomPaths[i].Path[0], "N", 6)==0) at0 = p_pb[b].N; else if(strncmp(map.ExcludedAtomPaths[i].Path[0], "H", 6)==0) at0 = p_pb[b].H; else if(strncmp(map.ExcludedAtomPaths[i].Path[0], "CD", 6)==0) at0 = p_pb[b].H; if(at0==-1) { printf("Error finding excluded atoms. Excluded atom path #%d starts at ", i); printf("an atom (%s) not found in the peptide bond.\n", map.ExcludedAtomPaths[i].Path[0]); return 0; } // Next, trace the atom indices from at0 to the end of the path. // If the path is incomplete (which it will be most of the time) // at<0 is returned. at = trace_path(&pbc, top, x, at0, map.ExcludedAtomPaths[i]); if(at>=0) { int oldat; int isnew = 1; // If we find a new atom, first check if it's alredy included in ExArray. // If not, go ahead and add it. for(oldat=0; oldat<nfound; oldat++) { if(ExArray[oldat]==at) isnew = 0; } if(isnew) { ExArray[nfound] = at; nfound++; } } } // Now trim the allocated memory to keep only the indices for found atoms. p_pb[b].Excluded = (int*) realloc(ExArray, nfound*sizeof(int)); if( (p_pb[b].Excluded==NULL) && (nfound!=0) ) { printf("Error trimming excess memory from protein bond %d excluded atoms array.\n", b+1); p_pb[b].nexcluded = 0; return 0; } p_pb[b].nexcluded = nfound; } return 1; } /******************************************************************************** * Memory Allocation * ********************************************************************************/ int set_arrays(real ****p_elecData, real ****p_ProElecData, int nelec, real ***p_angleData, real ***p_ProAngleData, int nangles, real ***p_freqData, real ***p_ProFreqData, int nfreq, real ****p_coupData, int ncoup, rvec ***p_coordData, rvec ***p_ProCoordData, rvec **p_dipData, rvec **p_centData, matrix **p_RotMat, matrix **p_ProRotMat, int bonds, int nsites, int nPro, int nProSites) { int error = 0; int i,ix; *p_coordData = (rvec**) malloc(bonds*sizeof(rvec*)); if(*p_coordData==NULL) error = 1; else { for(i=0; i<bonds; i++) { *(*p_coordData+i) = (rvec*) malloc(nsites*sizeof(rvec)); if(*(*p_coordData+i)==NULL) { error = 1; break; } } if(error) { if(i<nsites) { for(ix=0; ix<i; ix++) free(*(*p_coordData+ix)); free(*p_coordData); } printf("Error allocating memory for Coordinate data array.\n"); } } if(!error) { *p_dipData = (rvec*) malloc(bonds*sizeof(rvec)); if(*p_dipData==NULL) { error = 2; printf("Error allocating memory for Dipole moment data.\n"); } } if(!error) { *p_centData = (rvec*) malloc(bonds*sizeof(rvec)); if(*p_centData==NULL) { error = 3; printf("Error allocating memory for bond Center data.\n"); } } if(!error) { *p_RotMat = (matrix*) malloc(bonds*sizeof(matrix)); if(*p_RotMat==NULL) { error = 4; printf("Error allocating memory for Rotation Matrix data.\n"); } } if(!error) { if(!allocate_3d_array_real(p_elecData, bonds, nsites, nelec)) { printf("Error allocating Electrostatic variable array\n"); error = 5; } else if(!allocate_2d_array_real(p_angleData, bonds, nangles)) { printf("Error allocating Angles variable array\n"); error = 6; } else if(!allocate_2d_array_real(p_freqData, bonds, nfreq)) { printf("Error allocating Frequency variable array\n"); error = 7; } else if(!allocate_3d_array_real(p_coupData, bonds, bonds, ncoup)) { printf("Error allocating Coupling variable array\n"); error = 8; } } if(!error) { if(nProSites!=0) { if(!allocate_3d_array_real(p_ProElecData, nPro, nProSites, nelec)) { printf("Error allocating Proline Electrostatic variable array\n"); error = 9; } else if(!allocate_2d_array_real(p_ProAngleData, nPro, nangles)) { printf("Error allocating Proline Angles variable array\n"); error = 10; } else if(!allocate_2d_array_real(p_ProFreqData, nPro, nfreq)) { printf("Error allocating Proline Frequency variable array\n"); error = 11; } } *p_ProCoordData = (rvec**) malloc(nPro*sizeof(rvec*)); if(*p_ProCoordData==NULL) error = 12; else { for(i=0; i<nPro; i++) { *(*p_ProCoordData+i) = (rvec*) malloc(nProSites*sizeof(rvec)); if(*(*p_ProCoordData+i)==NULL) { error = 13; break; } } if(error) { if(i<nProSites) { for(ix=0; ix<i; ix++) free(*(*p_ProCoordData+ix)); free(*p_ProCoordData); } printf("Error allocating memory for Proline Coordinate data array.\n"); } } if(!error) { *p_ProRotMat = (matrix*) malloc(nPro*sizeof(matrix)); if(*p_ProRotMat==NULL) { error = 14; printf("Error allocating memory for Proline Rotation Matrix data.\n"); } } } if(!error) return 1; else { if(error>1) { for(i=0; i<bonds; i++) free(*(*p_coordData+i)); free(*p_coordData); } if(error>2) free(*p_dipData); if(error>3) free(*p_centData); if(error>4) free(*p_RotMat); if(error>5) free_3d_array_real(*p_elecData, bonds, nsites, nelec); if(error>6) free_2d_array_real(*p_angleData, bonds, nangles); if(error>7) free_2d_array_real(*p_freqData, bonds, nfreq); if(error>8) free_3d_array_real(*p_coupData, bonds, bonds, nfreq); if(error>9) free_3d_array_real(*p_ProElecData, nPro, nProSites, nelec); if(error>10) free_2d_array_real(*p_ProAngleData, nPro, nangles); if(error>11) free_2d_array_real(*p_ProFreqData, nPro, nfreq); if(error>12) { for(i=0; i<nPro; i++) free(*(*p_ProCoordData+i)); free(*p_ProCoordData); } if(error>13) free(*p_ProRotMat); return 0; } } int unset_arrays(real ***elecData, real ***ProElecData, int nelec, real **angleData, real **ProAngleData, int nangles, real **freqData, real **ProFreqData, int nfreq, real ***coupData, int ncoup, rvec **coordData, rvec **ProCoordData, rvec *dipData, rvec *centData, matrix *RotMat, matrix *ProRotMat, int bonds, int nsites, int nPro, int nProSites) { int i; free_3d_array_real(elecData, bonds, nsites, nelec); free_2d_array_real(angleData, bonds, nangles); free_2d_array_real(freqData, bonds, nfreq); free_3d_array_real(coupData, bonds, bonds, ncoup); for(i=0; i<bonds; i++) free(coordData[i]); free(dipData); free(centData); free(RotMat); if(nProSites!=0) { free_2d_array_real(ProFreqData, nPro, nfreq); free_3d_array_real(ProElecData, nPro, nProSites, nelec); free_2d_array_real(ProAngleData, nPro, nangles); free(ProRotMat); } return 1; } /******************************************************************************** * Parameters * ********************************************************************************/ int get_angles(t_pbc* p_pbc, t_topology top, rvec *x, t_amide_map map, t_protbond *p_pb, int bonds, real **angleData, int nangles) { const real rad2deg = 57.29578; rvec r_ij,r_jk,r_kl,m,n; real sign; int t1,t2,t3; int b; t_protbond pb; for(b=0; b<bonds; b++) { pb = p_pb[b]; if(pb.dih_set_N) { angleData[b][0] = rad2deg*dih_angle(x[pb.phiN[0]], x[pb.phiN[1]], x[pb.phiN[2]], x[pb.phiN[3]], p_pbc, r_ij, r_jk, r_kl, m, n, &sign, &t1, &t2, &t3); angleData[b][1] = rad2deg*dih_angle(x[pb.psiN[0]], x[pb.psiN[1]], x[pb.psiN[2]], x[pb.psiN[3]], p_pbc, r_ij, r_jk, r_kl, m, n, &sign, &t1, &t2, &t3); } else { angleData[b][0] = -1000.0; angleData[b][1] = -1000.0; } if(pb.dih_set_C) { angleData[b][2] = rad2deg*dih_angle(x[pb.phiC[0]], x[pb.phiC[1]], x[pb.phiC[2]], x[pb.phiC[3]], p_pbc, r_ij, r_jk, r_kl, m, n, &sign, &t1, &t2, &t3); angleData[b][3] = rad2deg*dih_angle(x[pb.psiC[0]], x[pb.psiC[1]], x[pb.psiC[2]], x[pb.psiC[3]], p_pbc, r_ij, r_jk, r_kl, m, n, &sign, &t1, &t2, &t3); } else { angleData[b][2] = -1000.0; angleData[b][3] = -1000.0; } } return 1; } real get_NN_val(real phi, real psi, real **data, int dim1, int dim2) { real val; real f_phi, f_psi; real start = -180.0; real stop = 180.0; real step1 = (stop - start) / ( (real) (dim1-1) ); real step2 = (stop - start) / ( (real) (dim2-1) ); int phi_ndx = (int) ( (phi-start)/step1 ); int psi_ndx = (int) ( (psi-start)/step2 ); f_phi = (phi - (start+phi_ndx*step1) ) / step1; f_psi = (psi - (start+psi_ndx*step2) ) / step2; if(phi_ndx==-1) phi_ndx = dim1-2; if(psi_ndx==-1) psi_ndx = dim2-2; if(phi_ndx==dim1-1) phi_ndx = 0; if(psi_ndx==dim2-1) psi_ndx = 0; val = 0.0; if( (phi_ndx>=0) && (psi_ndx>=0) && (phi_ndx<dim1) && (psi_ndx<dim2)) { val += (1-f_phi)*((1-f_psi)*data[phi_ndx][psi_ndx] + f_psi*data[phi_ndx][psi_ndx+1] ); val += f_phi * ((1-f_psi)*data[phi_ndx+1][psi_ndx]+f_psi*data[phi_ndx+1][psi_ndx+1] ); } return val; } int get_coordinates(t_pbc pbc, t_topology top, rvec *x, t_amide_map map, t_protbond *p_pb, int bonds, matrix *RotMat, rvec **coordData) { int b,i,j; rvec x_ax, y_ax, z_ax, CN_vec; rvec dx; for(b=0; b<bonds; b++) { // RotMat holds the molecular coordinate unit vectors pbc_dx(&pbc, x[p_pb[b].O], x[p_pb[b].C], x_ax); unitv(x_ax, x_ax); pbc_dx(&pbc, x[p_pb[b].N], x[p_pb[b].C], CN_vec); cprod(x_ax, CN_vec, z_ax); unitv(z_ax, z_ax); cprod(z_ax, x_ax, y_ax); unitv(y_ax, y_ax); for(i=0; i<3; i++) { RotMat[b][i][0] = x_ax[i]; RotMat[b][i][1] = y_ax[i]; RotMat[b][i][2] = z_ax[i]; } clear_rvec(dx); for(i=0; i<map.nsites; i++) { clear_rvec(coordData[b][i]); // MapAtoms[i][0] is our reference point. copy_rvec(x[p_pb[b].MapAtoms[i][0]], coordData[b][i]); for(j=1; j<map.MapSites[i].natoms; j++) { pbc_dx(&pbc, x[p_pb[b].MapAtoms[i][j]], x[p_pb[b].MapAtoms[i][0]], dx); svmul((1.0)/map.MapSites[i].natoms, dx, dx); rvec_inc(coordData[b][i], dx); } // Incorrect PBC treatment at boundaries. Replaced 08/25/2014 by MER. //clear_rvec(coordData[b][i]); //for(j=0; j<map.MapSites[i].natoms; j++) rvec_inc(coordData[b][i], x[p_pb[b].MapAtoms[i][j]]); //svmul((1.0)/map.MapSites[i].natoms, coordData[b][i], coordData[b][i]); // // The lines below compare PBC-corrected and uncorrected values. //rvec x0; //clear_rvec(x0); //for(j=0; j<map.MapSites[i].natoms; j++) rvec_inc(x0, x[p_pb[b].MapAtoms[i][j]]); //svmul((1.0)/map.MapSites[i].natoms, x0, x0); //rvec_sub(x0, coordData[b][i], dx); //printf("PBC - NoPBC: (%6.10f, %6.10f, %6.10f)\n", dx[0], dx[1], dx[2]); } } return 1; } int get_electrostatics(t_pbc pbc, t_topology top, rvec *x, t_amide_map map, t_protbond *p_pb, matrix *RotMat, int bonds, rvec **coordData, real ***elecData, int nelec, int nthreads, real cutoff, int nchunks, int* START, int* STOP) { int b; int natoms = top.atoms.nr; real const nm2bohr = 18.8973; real bcutoff = cutoff*nm2bohr; int error = 0; #if OMP_PARALLEL omp_set_num_threads(nthreads); #pragma omp parallel for \ shared(natoms, pbc, top, x, map, p_pb, RotMat, bonds, \ coordData, elecData, nelec, nchunks, START, STOP) #endif for(b=0; b<bonds; b++) { int i,j,at, k; // k for looping chunks real d, invd, invd2, invd3, invd5; real q; real DX, DY, DZ; rvec x_ax, y_ax, z_ax; rvec dx; for(i=0; i<3; i++) { x_ax[i] = RotMat[b][i][0]; y_ax[i] = RotMat[b][i][1]; z_ax[i] = RotMat[b][i][2]; } for(i=0; i<nelec; i++) { if(map.elec_used[i]) { for(j=0; j<map.nsites; j++) elecData[b][j][i] = 0.0; } } for(at=0; at<natoms; at++) { int good = 1; // cjfeng 08/27/2016 if(nchunks) { // Check if we need to include only part of bath for(k=0; k<nchunks; k++) { if(at<START[k]) { // less than the starting point good = 0; } else if(at>STOP[k]) { // more than the ending point good = 0; } else { // Within one of the region good = 1; break; } } } for(i=0; i<p_pb[b].nexcluded; i++) { if(at==p_pb[b].Excluded[i]) { good = 0; break; } } if(good) { for(i=0; i<map.nsites; i++) { pbc_dx(&pbc, coordData[b][i], x[at], dx); //if((b==55)) if(i==0) printf("%6.10f\t%6.10f\t%6.10f\n", dx[0], dx[1], dx[2]); svmul(nm2bohr, dx, dx); d = norm(dx); if( (d!=0.0) && (d<bcutoff) ) { DX = iprod(x_ax, dx); DY = iprod(y_ax, dx); DZ = iprod(z_ax, dx); invd = (1.0)/d; invd2 = invd*invd; invd3 = invd*invd2; invd5 = invd2*invd3; q = top.atoms.atom[at].q; if(map.elec_used[0]) elecData[b][i][0] += q * invd; // Potential if(map.elec_used[1]) elecData[b][i][1] += ( q * DX ) * ( invd3 ); // x-field if(map.elec_used[2]) elecData[b][i][2] += ( q * DY ) * ( invd3 ); // y-field if(map.elec_used[3]) elecData[b][i][3] += ( q * DZ ) * ( invd3 ); // z-field if(map.elec_used[4]) elecData[b][i][4] += ( q*invd3 ) * ( 1 - 3*DX*DX*invd2 ); // xx if(map.elec_used[5]) elecData[b][i][5] -= ( 3*q ) * ( DX*DY*invd5 ); // xy if(map.elec_used[6]) elecData[b][i][6] -= ( 3*q ) * ( DX*DZ*invd5 ); // xz if(map.elec_used[7]) elecData[b][i][7] += ( q*invd3 ) * ( 1 - 3*DY*DY*invd2 ); // yy if(map.elec_used[8]) elecData[b][i][8] -= ( 3*q ) * ( DY*DZ*invd5 ); // yz if(map.elec_used[9]) elecData[b][i][9] += ( q*invd3 ) * ( 1 - 3*DZ*DZ*invd2 ); // zz //if(b==2) if(i==0) printf("Bond %d, atom %d: field %6.10f\n", b, at, elecData[b][i][1]); //if(b==55) if(i==0) printf("Bond %d, atom %d: charge %6.10f\n", b, at, q); //if(b==2) if(i==0) printf("Bond %d, atom %d: distance %6.10f\n", b, at, d); } else if( d==0.0 ) { printf("Division by zero when calculating electrostatics between atom %d (bond %d) and site %d.\n", at, b+1, i); error = 0; } } } } } return 1; } int get_freq(t_amide_map map, t_protbond *p_pb, int bonds, real ***elecData, int nelec, real **angleData, int nangles, real **freqData, int nfreq) { int b,i,j; real wel, wnn; for(b=0; b<bonds; b++) { wel = 0.0; wnn = 0.0; if(map.dimNNFSN[0]*map.dimNNFSN[1]>0) wnn += get_NN_val(angleData[b][0], angleData[b][1], map.NNFSN, map.dimNNFSN[0], map.dimNNFSN[1]); if(map.dimNNFSC[0]*map.dimNNFSC[1]>0) wnn += get_NN_val(angleData[b][2], angleData[b][3], map.NNFSC, map.dimNNFSC[0], map.dimNNFSC[1]); for(i=0; i<map.nsites; i++) { for(j=0; j<nelec; j++) { if(map.elec_used[j]) wel += elecData[b][i][j]*map.MapSites[i].shift[j]; } } freqData[b][0] = wel + wnn + map.freq; if(p_pb[b].isPro) { freqData[b][0] += map.proshift; } freqData[b][1] = wel; freqData[b][2] = wnn; //printf("Bond %d: %6.10f\n", b, elecData[b][0][1]); //for(i=0; i<3; i++) if(b==2) printf("%6.10f\n", elecData[b][0][i+1]); } return 1; } int get_dip(t_pbc pbc, rvec *x, t_amide_map map, t_protbond *p_pb, int bonds, real ***elecData, int nelec, matrix *RotMat, rvec *dipData, rvec *centData) { int b,i,j; rvec CN, CO; rvec mu; real fCO = 0.0665; // Remember pdb units are nm, not Angstrom real fCN = 0.0258; for(b=0; b<bonds; b++) { pbc_dx(&pbc, x[p_pb[b].N], x[p_pb[b].C], CN); unitv(CN, CN); svmul(fCN, CN, CN); pbc_dx(&pbc, x[p_pb[b].O], x[p_pb[b].C], CO); unitv(CO, CO); svmul(fCO, CO, CO); copy_rvec(x[p_pb[b].C], centData[b]); // The C atom is our reference point. rvec_inc(centData[b], CO); // Augment position with CO and CN vectors. rvec_inc(centData[b], CN); copy_rvec(map.dip, mu); for(i=0; i<map.nsites; i++) { for(j=0; j<nelec; j++) { if(map.elec_used[j]) { mu[0] += elecData[b][i][j]*map.MapSites[i].dipx[j]; mu[1] += elecData[b][i][j]*map.MapSites[i].dipy[j]; mu[2] += elecData[b][i][j]*map.MapSites[i].dipz[j]; } } } mvmul(RotMat[b], mu, dipData[b]); } return 1; } int get_pdc(t_pbc pbc, t_protbond *p_pb, int bonds, rvec *dipData, rvec *centData, real ***coupData, int ncoup) { int b1, b2; const real A = 0.1*(848619.0 / 1650.0) / (0.1011*0.1011); // Dipole derivatives can be converted to transition dipole moments using the factor // \sqrt( \hbar / 2*w_i ) = 4.1189e-25 m kg^{1/2} = 0.1011 Angstrom * amu^{1/2} // for a 1650 cm-1 vibration. A dipole derivative of 2.73 D/(A*amu^1/2) thus corresponds to // a dipole moment matrix element of 2.73*0.1011 = 0.276 D. // // The code below assumes that that the transition dipole moment is in units of Debye. real d, invd; rvec mu1, mu2; rvec r12; for(b1=0; b1<bonds; b1++) { coupData[b1][b1][0] = 0.0; copy_rvec(dipData[b1], mu1); for(b2=0; b2<b1; b2++) { copy_rvec(dipData[b2], mu2); pbc_dx(&pbc, centData[b2], centData[b1], r12); d = 10.0*norm(r12); invd = 1.0/d; unitv(r12, r12); coupData[b1][b2][0] = A*invd*invd*invd*( iprod(mu1,mu2) - 3.0*( iprod(mu1, r12)*iprod(mu2,r12) ) ); coupData[b2][b1][0] = coupData[b1][b2][0]; } } return 0; } int get_pdc_skinner(t_pbc pbc, t_protbond *p_pb, int bonds, rvec *dipData, rvec *centData, real ***coupData, int ncoup) { int b1, b2; const real A = 0.1*(848619.0 / 1650.0) * (2.73*2.73); real d, invd; rvec mu1, mu2; rvec r12; for(b1=0; b1<bonds; b1++) { coupData[b1][b1][0] = 0.0; copy_rvec(dipData[b1], mu1); unitv(mu1, mu1); for(b2=0; b2<b1; b2++) { copy_rvec(dipData[b2], mu2); unitv(mu2, mu2); pbc_dx(&pbc, centData[b2], centData[b1], r12); d = 10.0*norm(r12); invd = 1.0/d; unitv(r12, r12); coupData[b1][b2][0] = A*invd*invd*invd*( iprod(mu1,mu2) - 3.0*( iprod(mu1, r12)*iprod(mu2,r12) ) ); coupData[b2][b1][0] = coupData[b1][b2][0]; } } return 0; } /* Electrostatic coupling models such as TCC seek to evaluate matrix elements of the form <i| H_C |j> where H_C is the Coulomb interaction energy between the various atoms of the two coupled groups. To develop the TCC model, we expand in normal mode displacements from Q_i = Q_j = 0. By assumption, this starting point corresponds to the energy minimum, so that all first derivatives are zero. Second derivatives dH/dQ_i^2 and dH/DQ_j^2 corresponds to site energy shifts and are assumed to be accounted for by our electrostatic site energy maps. Mixed second derivatives d^2H/(dQ_i dQ_j), however, are expected to be non-zero and will form the basis for our TCC model. From this starting point, we need to evaluate coupling elements of the form <i| Q_i Q_j * (d^2 H_C/ dQ_i dQ_j) |j> = (d^2 H_C/ dQ_i dQ_j) <i| Q_i Q_j |j> = (d^2 H_C/ dQ_i dQ_j) \hbar/(2*sqrt{w_i w_j}) where the derivative term is evaluated at Q_i = Q_j = 0 and w_i and w_j are the frequencies of the i and j normal modes. We next neglect what should be a full integral over the ground state charge density as a function of adiabatic displacement along the normal mode coordinates and assume instead a simple sum of Coulomb interaction terms where charges are assigned to each site within the amide unit as a function of normal mode displacement: H_C = (1 / (4*\pi*\eps_0) ) * \sum_{m,n} ( q_n(Q_i) q_m(Q_j) ) / ( | r_n(Q_i) - r_m(Q_j) | ) where q_n(Q_i) and r_n(Q_i) are atomic charges and displacements of atom n as a function of the normal mode coordinate Q_i. The atomic displacements are by definition linear functions of the normal modes, i.e. r_n(Q_i) = r_n(0) + (dr_n/dQ_i) * Q_i. This linear relationship need not be true for the atomic charges, but for simplicity (and since we truncate at second order already), we make the same assumption for them: q_n(Q_i) = q_n(0) + (dq_n/dQ_i) * Q_i. The zero-displacement value q_n(0) should ideally be the same set of charges as used in electrostatic calculations for site energies, though discrepancies will not likely cause serious issues. Within this model, the mixed second derivatives are easily evaluated, with the result 4*\pi*\eps_0 * ( d^2 H_C / dQ_i dQ_j ) = \sum_{n,m} { + (dq_n*dq_m) / | r_{nm} | + ( q_n*q_m )*( dr_n \cdot dr_m) / | r_{nm} |^3 -3( q_n*q_m )*( dr_n \cdot r_{nm} )*( dr_m \cdot r_{nm} ) / | r_{nm} |^5 + [ ( dq_n*q_m )*( dr_m \cdot r_{nm} ) - ( q_n*dq_m )*( dr_n \cdot r_{nm} ) ] / | r_{nm} |^5 where for convenience we have denoted q_n(0) as q_n and (dq_n/dQ_i) as simply dq_n. The sums over n and m are to be taken over the atoms contributing to normal modes i and j, respectively. As input for the TCC calculation, we need a set of atomic charges q_n, charge fluxes dq_n, and atomic displacements dr_n (i.e. the displacement of each atom as a function of the normal mode coordinate). As a ratio of two quantities with units length and length*mass^{1/2}, the units on spatial displacements should be amu^{-1/2}. If U_{nx,j} is the unitary matrix required to diagonlize the Hessian matrix (in mass-weighted coordinates), these coefficients can be obtained as (dr_{nx}/dQ_j) = U_{nx,j}/sqrt{M_n} where r_{nx} is the x-coordinate of atom n, Q_j is the j^{th} normal mode and M_n is the mass of atom n. The charge flux should likewise be specified in units of e_o / ( Angstrom * amu^{1/2} ) since the derivative is taken along the (mass-weighted) normal mode coordinate. Note that the product of the dr_n and dq_n terms then has units of C / (Angstrom*amu). The suspicious mass unit is eliminated by the matrix element <i| Q_i Q_j |j> reported above. As an example, for a simple C=O oscillator, the correct mass-weighted normal mode coordinate is Q_j = ( sqrt(M_O)*r_{Ox} - sqrt(M_C)*r_{Cx} ) / sqrt(2) = ( 2.828 * r_{Ox} - 2.450 * r_{Cx} ) In this case, U_{Ox,j} = 1/sqrt{2}, while U_{Cx,j} = -1/sqrt{2}. The map input file should contain the entries COUPSITES: 2 C O NUX: -0.20412 0.17678 Q: 0.5 -0.5 The x-component of the dipole moment matrix element specified by these parameters is given by [mu_i]_x = (d[\mu_x]/dQ_i) = \sqrt( \hbar / (2*w_i) ) * \sum_n ( q_n(0)*dr_{nx} + dq_n r_{nx}(0) ) For a typical amide I frequency of w_i = 1650 cm-1, the prefactor evaluates to \sqrt( \hbar / 2*w_i ) = 4.1189e-25 m kg^{1/2} = 0.1011 Angstrom * amu^{1/2} while for the atomic charges and displacements specified above (letting dq_n = 0) the sum over n becomes Q_C*( 1/sqrt{2*M_C} ) + Q_O*( 1/sqrt{2*M_O} ) = 0.5 * ( -0.20412 ) - 0.5 * ( 0.17678 ) = -0.1905 e_o / amu^{1/2} giving together a dipole moment magnitude of 0.1011 * 0.1905 = 0.0193 e_o*Angstrom = 0.0193*(1.60217657e-19 C/e_o)*(1e-10 m/Angstrom) = (3.0922e-31 C m) / (3.33564e-30 C m/Debye) = 0.0927 Debye Note that compared to the experimental value of ~0.35 Debye, this model severely underestimates the Amide I oscillator strength (see Ackels et al. Vib. Spec. 50 (2009) 2–9). The discrepancy is due to the neglect of charge flow within the bond. During C=O dispacement the bonding characteristics of the amide group change, shifting from a C=O double bond resonance structure in which all atoms have formal charge of zero toward a C--O single bond structure in which the N obtains a positive charge and the O a negative one: (+) H-N H-N \ <--> \\ C=O C--O (-) To obtain a simple estimate for the atomic charges required to obtain the experimentally observed transition dipole moment strength, take as coordinates for a typical amide bond r_C = [0.000 0.000 0.000]; r_O = [1.230 -0.000 0.000]; r_N = [-0.65 1.170 0.000]; r_H = [-1.64 1.080 0.100]; in units of Angstrom. Let us suppose q_n(0) charges of 0.5, -0.5, -0.3, +0.3 for C, O, N, and H, respectively, and that the the normal mode motion involves only the C and O atoms. The contribution to the dipole moment from the static charges is then the same as calculated above, while the contribution from charge motion remains to be assigned. Assuming as a simplest model that charge flows only from N to O, we can characterize the charge flux with a single coefficient dq_N = -dq_Oj = dq > 0. The contribution to the x-component of the dipole moment then becomes \sqrt( \hbar / (2*w_i) ) * \sum_n ( q_n(0)*dr_{nx} + dq_n r_{nx}(0) ) = 0.1011 * dq * ( r_{Nx} - r_{Ox} ) = 0.1901 * {dq} e_o*Angstrom = 3.0457e-30 {dq} C*m = 0.9131 {dq} Debye (where {dq} represents the numerical value of the charge flux parameter without its units) and to the y-component 0.1011 * dq * ( r_{Ny} - r_{Oy} ) = 0.1183 * {dq} e_o*Angstrom = 1.8954e-30 {dq} C*m = 0.5682 {dq} Debye. The total oscillator strength is then (0.0927+0.9131 dq)^2 + (0.5682 dq)^2 . Setting equal to the experimental value of 0.12 Debye^2, we solve for dq to obtain 0.0086 + 0.1693 dq + 1.157 dq^2 = 0.12 1.157 dq^2 + 0.1693 dq - 0.12 = 0 --> dq = 0.257 e_o / ( Angstrom * amu^{1/2} ) A minimal (experimentally feasible) TCC model for amide I would thus consist of the mapfile entries COUPSITES: 4 C O N H NUX: -0.20412 0.17678 0.0 0.0 Q: 0.5 -0.5 -0.4 0.4 DQ: -0.257 0.0 0.257 0.0 To get the value of 1/(4*pi*Eps0) used below, start with Eps0 in standard units: Eps0 = 8.85418e-12 F/m In what follows, curly brackets {} represent numerical values (i.e. a quantity minus its units). Let F = Farads, m = meters, C = coloumbs, J = Joules, cm-1 = wavenumbers, Ang = angstrom, eo = units of elementary charge. We'll need the fundamental constants Eps0 = 8.8541878176e-12 F/m h = 6.62606957e-34 J s c = 2.99792458e10 cm / s E0 = 1.602176565e-19 C and the conversion relations {X} m = {X*1e10} Ang {X} F = {X} C^2/J {X} J = {X/hc} cm-1 {X} C = {X/EO} eo Applying these in order, we get {X} F/m = {X/1e10} F/Ang = {( X )/( 1e10 )} ( C^2 )/( Ang J ) = {( X h c )/( 1e10 )} ( C^2 )/( Ang cm-1 ) = {( X h c )/( EO^2 1e10 )} ( eo^2 )/( Ang cm-1 ) So we have 4*pi*Eps0 = {( 4*pi*(8.8541878176e-12)*(6.62606957e-34)*(2.99792458e10) )/( (1e10)*(1.602176565e-19)^2 )} ( eo^2 )/( Ang cm-1 ) Finally, we note that literature normal modes and charge fluxes are frequently reported in "unitless" coordinates, in which the mass-weighted normal mode coefficients dr/dQ have been multiplied by a factor with mass units. These values can be converted to standard units (for input in the map file) by (1) multiplying the reported normal mode coefficient by the square root of the atomic mass, (2) normalizing the resultant vector to have unit length (thus producing the corresponding column of the unitary transformation matrix which transforms mass-weighted coordinates into mass-weighted normal modes), and (3) dividing by the square root of the mass to produce dr/dQ. The charge fluxes must likewise be re-scaled by the same factor. For the Jansen TCC map parameters reported in [J. Chem. Phys. 125, 044312 (2006)], the conversion can be done in Matlab by entering the commands: dq = [0.01668, -0.02845, -0.01530, 0.01736, 0.00008, 0.00963]'; nu = 0.028074*[0, 0, 0; -0.831, 0.105, 0.0; 0.517, -0.047, 0.0; 0.074, -0.036, 0.0; 0.073, -0.133, 0.0; 0.0, 0.0, 0.0]; NU = [nu(:,1); nu(:,2); nu(:,3)]; m = [12, 12, 16, 14, 2, 12]'; M = [m; m; m]; U = NU.*sqrt(M); U = U/norm(U); nu = (U./sqrt(M)); dq = dq*(nu(2)/NU(2)); The resultant values nu and dq should be dq = [0.165604207569485 -0.282460413989918 -0.151903140036758 0.172355458237785 0.000794264784505922 0.0956096234349003] and nu = [0.0000 -0.231622444056777 0.144102050032917 0.0206258253432028 0.0203470979737001 0.0 0.0 0.0292663737977878 -0.0131001863666288 -0.0100341853020987 -0.0370707401438645 0.0 0.0 0.0 0.0 0.0 0.0 0.0] */ int get_tcc(t_pbc pbc, t_amide_map map, rvec *x, t_protbond *p_pb, int bonds, matrix *RotMat, real ***coupData, int ncoup) { int i,j,n,m,k; rvec dx, ri, rj, rij, nu, nui, nuj; real d, invd, invd3, invd5, J; real qn, qm, dqn, dqm; real ONEOVER4PIEPS0 = 1.161409733881537e+05; // units of ( cm-1 * Ang )/( eo^2 ) // For coupling purposes, we take wi = wj = 1650 cm-1. real prefac = 0.0102168287; // \hbar/(2*sqrt{w_i w_j}) in Angstrom^2 * amu for(i=0; i<bonds; i++) { for(j=0; j<i; j++) { J = 0.0; int divbyzero = 0; for(n=0; n<map.ncoupsites; n++) { nu[0] = map.CoupSites[n].nux; nu[1] = map.CoupSites[n].nuy; nu[2] = map.CoupSites[n].nuz; mvmul(RotMat[i], nu, nui); copy_rvec(x[p_pb[i].CoupAtoms[n][0]], ri); for(k=1; k<p_pb[i].ncoupatoms[n]; k++) { pbc_dx(&pbc, x[p_pb[i].CoupAtoms[n][k]], x[p_pb[i].CoupAtoms[n][0]], dx); svmul((1.0)/p_pb[i].ncoupatoms[n], dx, dx); rvec_inc(ri, dx); } // Dipole moment components in e_o * Angstrom for(m=0; m<map.ncoupsites; m++) { nu[0] = map.CoupSites[m].nux; nu[1] = map.CoupSites[m].nuy; nu[2] = map.CoupSites[m].nuz; mvmul(RotMat[j], nu, nuj); copy_rvec(x[p_pb[j].CoupAtoms[m][0]], rj); for(k=0; k<p_pb[j].ncoupatoms[m]; k++) { pbc_dx(&pbc, x[p_pb[j].CoupAtoms[m][k]], x[p_pb[j].CoupAtoms[m][0]], dx); svmul((1.0)/p_pb[j].ncoupatoms[m], dx, dx); rvec_inc(rj, dx); } pbc_dx(&pbc, ri, rj, rij); // points towards i bond svmul(10.0, rij, rij); d = norm(rij); if(d==0) { //printf("Warning: Division by zero when calculating transition charge coupling constant for bonds %d and %d.\n", i, j); //printf("Setting coupling constant to zero...\n"); divbyzero = 1; break; } invd = (1.0)/d; invd3 = invd*invd*invd; invd5 = invd3*invd*invd; qn = map.CoupSites[n].q; qm = map.CoupSites[m].q; dqn = map.CoupSites[n].dq; dqm = map.CoupSites[m].dq; J += prefac*ONEOVER4PIEPS0*(dqn*dqm)*invd; J -= prefac*ONEOVER4PIEPS0*(3*qn*qm)*iprod(nui,rij)*iprod(nuj,rij)*invd5; J -= prefac*ONEOVER4PIEPS0*( -dqn*qm*iprod(nuj,rij) + qn*dqm*iprod(nui,rij) - qn*qm*iprod(nui,nuj) )*invd3; } } if(divbyzero) J = 0.0; coupData[i][j][0] = J; coupData[j][i][0] = J; //if((i==2) && (j==0)) printf("TCC (%d, %d): %6.10f\n", i, j, J); } } return 0; } int get_tc_dip(t_pbc pbc, rvec *x, t_amide_map map, t_protbond *p_pb, int bonds, matrix *RotMat, rvec *dipData, rvec *centData) { int i,n,k; rvec dx, ri, nu, nui; rvec dip, ref; // real pi = 3.14159265359; // real c = 2.99792458e+10; // cm/s // real hbar = (6.62606957e-34) / (2.0 * pi); // J*s // real wo = 1650.0*c*2.0*pi; // 1/s real prefac = 0.485498038384979; // sqrt( hbar / (2.0*wo) ) * ( 1.60217657e-19 ) / ( sqrt( 1.660538921e-27 ) * ( 3.33564e-30 ) ); // \sqrt( \hbar / 2*w_i ) * ( 1.60217657e-19 C/eo ) / ( sqrt( 1.660538921e-27 amu/kg) * ( 3.33564e-30 C*m/Debye ) ) for(i=0; i<bonds; i++) { // We use CoupAtoms[0][0] as our spatial reference point. // All coordinates MUST be updated relative to this point; // otherwise PBC may give errors. copy_rvec(x[p_pb[i].CoupAtoms[0][0]], ref); for(k=0; k<3; k++) dip[k] = 0.0; for(n=0; n<map.ncoupsites; n++) { nu[0] = map.CoupSites[n].nux; nu[1] = map.CoupSites[n].nuy; nu[2] = map.CoupSites[n].nuz; mvmul(RotMat[i], nu, nui); copy_rvec(x[p_pb[i].CoupAtoms[n][0]], ri); for(k=1; k<p_pb[i].ncoupatoms[n]; k++) { pbc_dx(&pbc, x[p_pb[i].CoupAtoms[n][k]], x[p_pb[i].CoupAtoms[n][0]], dx); svmul((1.0)/p_pb[i].ncoupatoms[n], dx, dx); rvec_inc(ri, dx); } // Now treat PBC: Take pbc_dx of ri from ref to get correct pbc-treated spatial displacement. pbc_dx(&pbc, ri, ref, dx); // Then add (without pbc-correction) this displacement to ref to get the pbc-corrected ri. // This treatment assures that the relative positions of all coupling sites are consistent // with one another within a single pbc image. for(k=0; k<3; k++) ri[k] = ref[k] + dx[k]; // Dipole moment components in e_o * Angstrom for(k=0; k<3; k++) dip[k] += (map.CoupSites[n].q*nui[k] + 10.0*map.CoupSites[n].dq*ri[k]); } for(k=0; k<3; k++) dipData[i][k] = prefac*dip[k]; } return 0; } real get_avg_tc_dip(t_pbc pbc, rvec *x, t_amide_map map, t_protbond *p_pb, int bonds, matrix *RotMat) { int i,n,k; rvec dx, ri, nu, nui; rvec dip; real prefac = 0.485498038384979; // sqrt( hbar / (2.0*wo) ) * ( 1.60217657e-19 ) / ( sqrt( 1.660538921e-27 ) * ( 3.33564e-30 ) ); // \sqrt( \hbar / 2*w_i ) * ( 1.60217657e-19 C/eo ) / ( sqrt( 1.660538921e-27 amu/kg) * ( 3.33564e-30 C*m/Debye ) ) real avg = 0.0; rvec ref; for(i=0; i<bonds; i++) { // We use CoupAtoms[0][0] as our spatial reference point. // All coordinates MUST be updated relative to this point; // otherwise PBC may give errors. copy_rvec(x[p_pb[i].CoupAtoms[0][0]], ref); for(k=0; k<3; k++) dip[k] = 0.0; for(n=0; n<map.ncoupsites; n++) { nu[0] = map.CoupSites[n].nux; nu[1] = map.CoupSites[n].nuy; nu[2] = map.CoupSites[n].nuz; mvmul(RotMat[i], nu, nui); copy_rvec(x[p_pb[i].CoupAtoms[n][0]], ri); for(k=1; k<p_pb[i].ncoupatoms[n]; k++) { pbc_dx(&pbc, x[p_pb[i].CoupAtoms[n][k]], x[p_pb[i].CoupAtoms[n][0]], dx); svmul((1.0)/p_pb[i].ncoupatoms[n], dx, dx); rvec_inc(ri, dx); } // Now treat PBC: Take pbc_dx of ri from ref to get correct pbc-treated spatial displacement. pbc_dx(&pbc, ri, ref, dx); // Then add (without pbc-correction) this displacement to ref to get the pbc-corrected ri. // This treatment assures that the relative positions of all coupling sites are consistent // with one another within a single pbc image. for(k=0; k<3; k++) ri[k] = ref[k] + dx[k]; // Dipole moment components in e_o * Angstrom for(k=0; k<3; k++) dip[k] += (map.CoupSites[n].q*nui[k] + 10.0*map.CoupSites[n].dq*ri[k]); } real val = 0.0; for(k=0; k<3; k++) val += prefac*prefac*dip[k]*dip[k]; avg += sqrt(val)/( (real) bonds ); } return avg; } int get_tcc_jansen(t_pbc pbc, t_amide_map map, rvec *x, t_protbond *p_pb, int bonds, matrix *RotMat, real ***coupData, int ncoup) { /* To get the value of 1/(4*pi*Eps0) used below, start with Eps0 in standard units: Eps0 = 8.85418e-12 F/m In what follows, curly brackets {} represent numerical values (i.e. a quantity minus its units). Let F = Farads, m = meters, C = coloumbs, J = Joules, cm-1 = wavenumbers, Ang = angstrom, eo = units of elementary charge. We'll need the fundamental constants Eps0 = 8.8541878176e-12 F/m h = 6.62606957e-34 J s c = 2.99792458e10 cm / s E0 = 1.602176565e-19 C and the conversion relations {X} m = {X*1e10} Ang {X} F = {X} C^2/J {X} J = {X/hc} cm-1 {X} C = {X/EO} eo Applying these in order, we get {X} F/m = {X/1e10} F/Ang = {( X )/( 1e10 )} ( C^2 )/( Ang J ) = {( X h c )/( 1e10 )} ( C^2 )/( Ang cm-1 ) = {( X h c )/( EO^2 1e10 )} ( eo^2 )/( Ang cm-1 ) So we have 4*pi*Eps0 = {( 4*pi*(8.8541878176e-12)*(6.62606957e-34)*(2.99792458e10) )/( (1e10)*(1.602176565e-19)^2 )} ( eo^2 )/( Ang cm-1 ) */ int ndxi[6], ndxj[6]; int i,j,n,m; real ONEOVER4PIEPS0 = 1.161409733881537e+05; // units of ( cm-1 * Ang )/( eo^2 ) rvec rij, nui, nuj; real d, invd, invd3, invd5, J; real amp = 0.028074; // units of Angstrom/( normal mode amplitude ) // Vector nu is the normal mode coordinate in units of the normal mode amplitude // just defined. nu*amp has units of Angstrom. rvec nu[6] = { { 0.0, 0.0, 0.0}, {-0.831, 0.105, 0.0}, {0.517, -0.047, 0.0}, {0.074, -0.036, 0.0}, {0.073, -0.133, 0.0}, {0.0, 0.0, 0.0} }; // units of elementary charge real q[6] = {0.11072, 0.37173, -0.53632, -0.48418, 0.24278, 0.29527}; real dq[6] = {0.01668, -0.02845, -0.01530, 0.01736, 0.00008, 0.00963}; for(i=0; i<bonds; i++) { ndxi[0] = p_pb[i].CAN; ndxi[1] = p_pb[i].C; ndxi[2] = p_pb[i].O; ndxi[3] = p_pb[i].N; ndxi[4] = p_pb[i].H; ndxi[5] = p_pb[i].CAC; for(j=0; j<i; j++) { // Make sure they're not nearest neighbors--otherwise, get division by zero if( (p_pb[i].CAN!=p_pb[j].CAC) && (p_pb[j].CAN!=p_pb[i].CAC) ) { ndxj[0] = p_pb[j].CAN; ndxj[1] = p_pb[j].C; ndxj[2] = p_pb[j].O; ndxj[3] = p_pb[j].N; ndxj[4] = p_pb[j].H; ndxj[5] = p_pb[j].CAC; J = 0.0; for(n=0; n<6; n++) { mvmul(RotMat[i], nu[n], nui); for(m=0; m<6; m++) { mvmul(RotMat[j], nu[m], nuj); pbc_dx(&pbc, x[ndxi[n]], x[ndxj[m]], rij); // points towards i bond svmul(10.0, rij, rij); d = norm(rij); invd = (1.0)/d; invd3 = invd*invd*invd; invd5 = invd3*invd*invd; J += ONEOVER4PIEPS0*(dq[n]*dq[m])*invd; J -= ONEOVER4PIEPS0*amp*amp*(3*q[n]*q[m])*iprod(nui,rij)*iprod(nuj,rij)*invd5; J -= ONEOVER4PIEPS0*( -dq[n]*q[m]*amp*iprod(nuj,rij) + q[n]*dq[m]*amp*iprod(nui,rij) - q[n]*q[m]*amp*amp*iprod(nui,nuj) )*invd3; } } coupData[i][j][0] = J; coupData[j][i][0] = J; } coupData[i][i][0] = 0.0; } } return 0; } int get_nnc(t_amide_map map, t_protbond *p_pb, int bonds, real **angleData, real ***coupData) { real Jnn; int b1, b2; int NN; for(b1=0; b1<bonds; b1++) { for(b2=0; b2<b1; b2++) { NN = 0; // Check if the N-terminal CA of b1 is the C-terminal CA of b2 // Otherwise, check if the N-terminal CA of b2 is the C-terminal CA of b1 if( ((p_pb[b1].dih_set_N*p_pb[b2].dih_set_C)==1) && (p_pb[b1].psiN[1]==p_pb[b2].psiC[1]) ) { Jnn = get_NN_val(angleData[b1][0], angleData[b1][1], map.NNC, map.dimNNC[0], map.dimNNC[1]); NN = 1; } else if( ((p_pb[b1].dih_set_C*p_pb[b2].dih_set_N)==1) && (p_pb[b2].psiN[1]==p_pb[b1].psiC[1]) ) { Jnn = get_NN_val(angleData[b1][2], angleData[b1][3], map.NNC, map.dimNNC[0], map.dimNNC[1]); NN = 1; } if(NN) { coupData[b1][b2][0] = Jnn; coupData[b2][b1][0] = Jnn; coupData[b1][b2][2] = Jnn; coupData[b2][b1][2] = Jnn; } } } return 1; } int get_dnnc(t_amide_map map, t_protbond *p_pb, int bonds, real **angleData, real ***coupData) { real Jnn; int b1, b2; int NN; for(b1=0; b1<bonds; b1++) { for(b2=0; b2<b1; b2++) { NN = 0; // Check if the N-terminal CA of b1 is the C-terminal CA of b2 // Otherwise, check if the N-terminal CA of b2 is the C-terminal CA of b1 if( ((p_pb[b1].dih_set_N*p_pb[b2].dih_set_C)==1) && (p_pb[b1].psiN[1]==p_pb[b2].psiC[1]) ) { Jnn = get_NN_val(angleData[b1][0], angleData[b1][1], map.DNNC, map.dimDNNC[0], map.dimDNNC[1]); NN = 1; } else if( ((p_pb[b1].dih_set_C*p_pb[b2].dih_set_N)==1) && (p_pb[b2].psiN[1]==p_pb[b1].psiC[1]) ) { Jnn = get_NN_val(angleData[b1][2], angleData[b1][3], map.DNNC, map.dimDNNC[0], map.dimDNNC[1]); NN = 1; } if(NN) { coupData[b1][b2][0] += Jnn; coupData[b2][b1][0] += Jnn; coupData[b1][b2][2] += Jnn; coupData[b2][b1][2] += Jnn; } } } return 1; } int open_info_file(FILE **p_fp, char* namebase, const int maxchar) { char infonm[maxchar]; if( (namebase!=NULL && (strlen(namebase)>0) )) { strcpy(infonm, namebase); if(namebase[strlen(namebase)-1]!='/') { strcat(infonm, "_"); } } else { infonm[0] = '\0'; } strcat(infonm, "info.txt"); *p_fp = fopen(infonm, "w"); if(*p_fp==NULL) { printf("Error opening output file for writing.\n"); return 0; } else return 1; } int open_spec_files(FILE **p_fp, int nfiles, char* namebase, const int maxchar) { int i; char sitenm[maxchar]; char hamnm[maxchar]; char dipxnm[maxchar]; char dipynm[maxchar]; char dipznm[maxchar]; if( (namebase!=NULL && (strlen(namebase)>0) )) { strcpy(sitenm, namebase); strcpy(hamnm, namebase); strcpy(dipxnm, namebase); strcpy(dipynm, namebase); strcpy(dipznm, namebase); if(namebase[strlen(namebase)-1]!='/') { strcat(sitenm, "_"); strcat(hamnm, "_"); strcat(dipxnm, "_"); strcat(dipynm, "_"); strcat(dipznm, "_"); } } else { sitenm[0] = '\0'; hamnm[0] = '\0'; dipxnm[0] = '\0'; dipynm[0] = '\0'; dipznm[0] = '\0'; } strcat(sitenm, "sites.txt"); strcat(hamnm, "ham.txt"); strcat(dipxnm, "dipx.txt"); strcat(dipynm, "dipy.txt"); strcat(dipznm, "dipz.txt"); p_fp[0] = fopen(sitenm, "w"); p_fp[1] = fopen(hamnm, "w"); p_fp[2] = fopen(dipxnm, "w"); p_fp[3] = fopen(dipynm, "w"); p_fp[4] = fopen(dipznm, "w"); for(i=0; i<nfiles; i++) { if(p_fp[i]==NULL) { printf("Error opening output file for writing.\n"); return 0; } } return 1; } int write_spec_data(FILE **p_fp, real ***coupData, real **freqData, rvec *dipData, int bonds) { int b1,b2; for(b1=0; b1<bonds; b1++) { fprintf(p_fp[0], "%10.6f\t", freqData[b1][0]); for(b2=0; b2<bonds; b2++) { if(b1==b2) fprintf(p_fp[1], "%10.6f\t", freqData[b1][0]); else fprintf(p_fp[1], "%10.6f\t", coupData[b1][b2][0]); } fprintf(p_fp[2], "%10.6f\t", dipData[b1][0]); fprintf(p_fp[3], "%10.6f\t", dipData[b1][1]); fprintf(p_fp[4], "%10.6f\t", dipData[b1][2]); } fprintf(p_fp[0], "\n"); fprintf(p_fp[1], "\n"); fprintf(p_fp[2], "\n"); fprintf(p_fp[3], "\n"); fprintf(p_fp[4], "\n"); return 1; } int close_spec_files(FILE **p_fp, int nfiles) { int i; for(i=0; i<nfiles; i++) fclose(p_fp[i]); return 1; } int open_elec_files(FILE **elecfp[10], char *outname, t_amide_map map, const int maxchar) { int i,j,k,l; char fname[maxchar]; char COMP[10][6] = { "P", "Ex", "Ey", "Ez", "Gxx", "Gxy", "Gxz", "Gyy", "Gyz", "Gzz" }; for(j=0; j<10; j++) { if(map.elec_used[j]) { elecfp[j] = (FILE**) malloc(map.nsites*sizeof(FILE*)); if(elecfp[j]==NULL) { printf("Error opening electrostatic output files. Please check input.\n"); int jx; for(jx=0; jx<j; jx++) free(elecfp[j]); return 0; } } else elecfp[j] = NULL; } for(j=0; j<10; j++) { for(i=0; i<map.nsites; i++) { if(map.elec_used[j]) { fname[0] = '\0'; if( (outname!=NULL) && (strlen(outname)>0) ) { strcpy(fname, outname); if(outname[strlen(outname)-1]!='/') strcat(fname, "_"); } strcat(fname, COMP[j]); for(k=0; k<map.MapSites[i].natoms; k++) { if(k>0) strcat(fname, "_AND"); for(l=0; l<map.MapSites[i].AtomPaths[k].length; l++) { strcat(fname, "_"); strcat(fname, map.MapSites[i].AtomPaths[k].Path[l]); } } strcat(fname, ".txt"); elecfp[j][i] = fopen(fname, "w"); if(elecfp[j][i]==NULL) { printf("Error opening electrostatic output files. Please check input.\n"); int ix, jx; for(ix=0; ix<i; ix++) fclose(elecfp[j][ix]); free(elecfp[j]); for(jx=0; jx<j; jx++) { for(ix=0; ix<map.nsites; ix++) fclose(elecfp[jx][ix]); free(elecfp[jx]); } return 0; } } } } return 1; } int open_angle_files(FILE *anglefp[10], char *outname, const int maxchar) { int i; char fname[maxchar]; char NAME[4][6] = {"phiN", "psiN", "phiC", "psiC"}; for(i=0; i<4; i++) { fname[0] = '\0'; if( (outname!=NULL) && (strlen(outname)>0) ) { strcpy(fname, outname); if(outname[strlen(outname)-1]!='/') strcat(fname, "_"); } strcat(fname, NAME[i]); strcat(fname, ".txt"); anglefp[i] = fopen(fname, "w"); if(anglefp[i]==NULL) { printf("Error opening angle output files. Please check input.\n"); int ix; for(ix=0; ix<i; ix++) fclose(anglefp[ix]); return 0; } } return 1; } int write_elec_dat(FILE **elecfp[10], t_amide_map map, real ***elecData, int bonds, int nelec) { int b,i,j; for(i=0; i<nelec; i++) { if(map.elec_used[i]) { for(j=0; j<map.nsites; j++) { for(b=0; b<bonds; b++) { fprintf(elecfp[i][j], "%6.10f\t", elecData[b][j][i]); } fprintf(elecfp[i][j], "\n"); } } } return 1; } int write_angle_dat(FILE *anglefp[4], real **angleData, int bonds, int nangles) { int b,i; for(i=0; i<nangles; i++) { for(b=0; b<bonds; b++) fprintf(anglefp[i], "%6.10f\t", angleData[b][i]); fprintf(anglefp[i], "\n"); } return 1; } int close_elec_files(FILE **elecfp[10], t_amide_map map) { int i,j; for(j=0; j<10; j++) { if(elecfp[j]!=NULL) { for(i=0; i<map.nsites; i++) if(elecfp[j][i]!=NULL) fclose(elecfp[j][i]); free(elecfp[j]); } } return 1; } int close_angle_files(FILE *anglefp[4]) { int i; for(i=0; i<4; i++) if(anglefp[i]!=NULL) fclose(anglefp[i]); return 1; } /******************************************************************************** * Main code. * ********************************************************************************/ int main ( int argc, char * argv[] ) { /************************************************************************ This section deals with command line arguments. Most of the action happens in the parse_common_args() command, which checks what command line arguments were provided and initializes the corresonding variables **************************************************************************/ // Variables set by command line arguments char* mapfile = NULL; char* promapfile = NULL; char* chargefile = NULL; char* outname = NULL; // cjfeng 08/27/2016 // Added chunk command to selectively choose portion of bath // to include in electrostatics. char* chunkstr = ""; int nthreads = 1; int print_elec = 0; int print_angles = 0; int nnc, nnfsn, nnfsc, dnnc; real cutoff = 1000.0; real osc = -1; int i; int verbose = 0; // A list of command line file flags t_filenm fnm[] = { { efTRX, "-f", NULL, ffREAD }, { efTPX, NULL, NULL, ffREAD } }; // A list of additional command line arguments t_pargs pa [] = { {"-mapfile", FALSE, etSTR, {&mapfile}, "Complete map file, specifying all parameters."}, {"-chargefile", FALSE, etSTR, {&chargefile}, "Complete charge file, specifying all charges."}, {"-promapfile", FALSE, etSTR, {&promapfile}, "Complete map file, specifying all parameters."}, {"-outname", FALSE, etSTR, {&outname}, "File name base for output"}, {"-nt", FALSE, etINT, {&nthreads}, "Number of threads"}, {"-print_elec", FALSE, etBOOL, {&print_elec}, "Print electrostatic values"}, {"-print_angles", FALSE, etBOOL, {&print_angles}, "Print dihedral angles"}, {"-cutoff", FALSE, etREAL, {&cutoff}, "Cutoff distance (nm)"}, {"-osc", FALSE, etREAL, {&osc}, "Oscillator strength (Debye^2). If positive, used to normalize dipole moments."}, {"-verbose", FALSE, etBOOL, {&verbose}, "Verbose (print lots of info)"}, {"-chunk", FALSE, etSTR, {&chunkstr}, "Chunk range for selecting part of the system to be included for electrostatic frequency shift, format: [a-b;...;c-d]. Indexing begins at zero."} }; // The program description const char *desc[] = {""}; // A description of known bugs const char *bugs[] = {""}; // Here we parse the command line arguments and make them available to call within the program output_env_t oenv; CopyRight(stderr, argv[0]); parse_common_args(&argc, argv, PCA_CAN_TIME | PCA_BE_NICE, asize(fnm), fnm, asize(pa), pa, asize(desc), desc, asize(bugs), bugs, &oenv); /********************************************************** * We first check if only part of the bath is included for * * computing electrostatic frequency shift. * * cjfeng 08/27/2016 * **********************************************************/ int START[1000]; // Starting index for computing electrostatics int STOP[1000]; // Ending index for computing electrostatics int nchunks = 0; // Number of chunks if( strlen(chunkstr)!=0 ) { int i, j; printf("Parsing chunk request %s...\n", chunkstr); if( (chunkstr[0]!='[') || (chunkstr[strlen(chunkstr)-1]!=']') ) { printf("Error parsing chunk string. Format: [a-b;c-d;...;e-f]\n"); return 0; } else { // Changed string processing code. MER 10/04/2014 char chunk[1000]; for(i=0; i<strlen(chunkstr); i++) { if( (chunkstr[i]==';') || (chunkstr[i]=='[') ) { for(j=i+1; j<strlen(chunkstr); j++) { if( (chunkstr[j]==';') || (chunkstr[j]==']') ) { strncpy(chunk, chunkstr+i+1, j-i-1); chunk[j-i-1] = '\0'; break; } } int start, stop; if( (j>=strlen(chunkstr)) || (sscanf(chunk, "%d-%d", &start, &stop)!=2) ) { printf("Error parsing input segment %s\n", chunk); return 0; } else { START[nchunks] = start; STOP[nchunks] = stop; nchunks++; } } } } } /************************************************************************ Now we check the input. First we check and set the mapping parameters, then the (first frame of the) provided structure file. **************************************************************************/ // First load map parameters t_amide_map map; t_amide_map promap; if( (mapfile==NULL) ) { printf("Please specify a map using the -mapfile flag.\n"); return 0; } // read_amide_map opens the provided map file and stores all data in the map structure if(!read_amide_map(mapfile, &map, verbose)) { printf("Error reading specified map file. Please check input.\n"); return 0; } printf("\nSuccessfully loaded all mapping parameters from input file %s. \n", mapfile); if((promapfile!=NULL)) { if(!read_amide_map(promapfile, &promap, verbose)) { printf("Error reading specified proline map file. Please check input.\n"); return 0; } printf("\nSuccessfully loaded all mapping parameters from proline map file %s. \n", promapfile); printf("\nWarning: Currently only site energy shifts are used from the proline map file. Coupling parameters (both through-space and through-bond) are taken from the default Amide I map.\n"); } else { promap.nsites = 0; } // Check whether nearest-neighbor coupling is used. if((map.dimNNC[0]*map.dimNNC[1])>0) nnc = 1; else nnc = 0; if((map.dimDNNC[0]*map.dimDNNC[1])>0) dnnc = 1; else dnnc = 0; // Check whether nearest-neighbor frequency shifts are used. if((map.dimNNFSN[0]*map.dimNNFSN[1])>0) nnfsn = 1; else nnfsn = 0; if((map.dimNNFSC[0]*map.dimNNFSC[1])>0) nnfsc = 1; else nnfsc = 0; if(verbose) { printf("Coupling model: %s\n", map.couptype); if(nnc) printf("Using nearest-neighbor coupling map.\n"); if(dnnc) printf("Using nearest-neighbor coupling difference map.\n"); if((nnc+dnnc)==0) printf("No nearest-neighbor coupling map in use.\n"); if(nnfsn) printf("Using N-terminal nearest-neighbor frequency-shift map.\n"); if(nnfsc) printf("Using C-terminal nearest-neighbor frequency-shift map.\n"); if((nnc+dnnc)==0) printf("No nearest-neighbor frequency shifts in use.\n"); } if(map.dipset) { if(verbose) printf("Using dipole vector provided in map file\n"); } else if(map.tcset) { if(verbose) printf("Using TCC parameters provided in map file to set dipole moment.\n"); } else { printf("Error! No method specified for calculating dipole moments. \n"); printf("Please provide either a dipole moment (DIP:) entry or TCC parameters.\n"); free_amide_map(map); return 0; } // Read atomic charges. int ffcharge = 0; int nres; t_residue *resarray; int nrep; char **reparray[2]; if( (chargefile!=NULL) ) { ffcharge = 0; if(read_charge_map(chargefile, &resarray, &nres, reparray, &nrep, verbose)!=0) { printf("Error reading charge file. \n"); free_amide_map(map); free_amide_map(promap); return 0; } } else { // If no chargefile is specified, we use force-field charges. // We still need to manually enter a replacement name array since // some names (particularly HN for the CHARMM27 force field) will // often need to be swapped. int error = 0; int j; ffcharge = 1; int nchars = 20; nrep = 8; nres = 0; resarray = NULL; reparray[0] = NULL; reparray[1] = NULL; reparray[0] = (char**) malloc(nrep*sizeof(char*)); reparray[1] = (char**) malloc(nrep*sizeof(char*)); if(reparray[0]==NULL) error = 1; if(reparray[1]==NULL) error = 1; for(i=0; i<nrep; i++) { if(!error) { reparray[0][i] = (char*) malloc(nchars*sizeof(char)); if(reparray[0][i]==NULL) { for(j=0; j<i; j++) free(reparray[0][j]); error = 2; } } else if(reparray[0]!=NULL) reparray[0][i] = NULL; if(!error) { reparray[1][i] = (char*) malloc(nchars*sizeof(char)); if(reparray[1][i]==NULL) { for(j=0; j<nrep; j++) free(reparray[1][j]); for(j=0; j<i; j++) free(reparray[1][j]); error = 2; } } else if(reparray[1]!=NULL) reparray[1][i] = NULL; } if(!error) { char *DEFREP1[] = { "HN", "O1", "O2", "OT1", "OT2", "HT2", "HT1", "HT2" }; char *DEFREP2[] = { "H", "OT", "O", "OT", "O", "HO", "H1", "H2"}; for(i=0; i<nrep; i++) { strcpy(reparray[0][i], DEFREP1[i]); strcpy(reparray[1][i], DEFREP2[i]); } } if(error) { if(reparray[0]!=NULL) free(reparray[0]); if(reparray[1]!=NULL) free(reparray[1]); free_amide_map(map); free_amide_map(promap); return 0; } } // Now structure files. First topology, then first frame of trajectory t_topology top; t_inputrec ir; t_trxstatus *status; int trrStatus = 1; real t; rvec *x; matrix box; int natoms; int ePBC; t_pbc pbc; // read_tpx_top() reads the topology file and stores the values in the t_topology type top. The t_inputrec type ir stores information // on the box and periodic boundary conditions. The matrix box contains the vectors defining the periodic box. ePBC = read_tpx_top(ftp2fn(efTPX,asize(fnm),fnm), &ir, box, &natoms, NULL, NULL, NULL, &top); // set_pbc initializes a t_pbc type variable pbc to be used later on in calls to, e.g. pbc_dx for calculating pbc-corrected distances. set_pbc(&pbc, ePBC, box); // read_first_x opens a trajectory file, allocates memory for one frame of coordinates (the rvec array x), // and reads the first frame from the trajectory. natoms = read_first_x(oenv, &status, ftp2fn(efTRX,asize(fnm),fnm), &t, &x, box); // Check to see if the number of atoms in topology and trajectory files match. if ( natoms > top.atoms.nr ) gmx_fatal(FARGS,"Topology (%d atoms) does not match trajectory (%d atoms)", top.atoms.nr, natoms); /************************************************************************ Now parse the protein structure to assign charges and identify amide bonds. **************************************************************************/ int error = 0; t_protbond *p_pb = NULL; // Protein bond array int b; // b is a counter, bonds is the number of bonds. int bonds = 0; t_protbond *p_ppb = NULL; // Proline protein bond array int nPro = 0; // First, make any atom name replacements necessary. if(!error) swap_atomnames(top, reparray, nrep, verbose); // Now reassign atomic charges. if( (!error) && (!ffcharge) ) if(identify_residues(&pbc, top, x, resarray, nres, verbose)!=0) error = 1; // And look for bonds. if(!error) { bonds = find_bonds(&pbc, top, x, &p_pb); printf("\nLocated %d amide bonds in the peptide chain\n", bonds); if(bonds<1) { printf("Error: No amide bonds located.\n"); error = 1; } else { for(b=0; b<bonds; b++) { if(p_pb[b].isPro) nPro++; } } printf("\nLocated %d proline bonds in the peptide chain\n", nPro); if(promap.nsites!=0) { p_ppb = (t_protbond*) malloc( sizeof(t_protbond)*nPro ); if(p_ppb==NULL) { printf("Error allocating memory for proline bonds.\n"); error = 1; } int count = 0; for(b=0; b<bonds; b++) { if(p_pb[b].isPro) { initialize_bond(&pbc, top, x, &p_ppb[count], p_pb[b].N, p_pb[b].H, p_pb[b].C, p_pb[b].O, p_pb[b].isPro); count++; } } } } // Next identify mapping sites for each bond if(!error) if(!find_map_sites(pbc, top, x, map, p_pb, bonds)) error = 1; if((!error) && (promap.nsites!=0)) if(!find_map_sites(pbc, top, x, promap, p_ppb, nPro)) error = 1; // And identify coupling sites for each bond // For now, coupling sites are taken ONLY from the main map file. // Coupling sites in the proline map file are ignored. if(!error) if(!find_coup_sites(pbc, top, x, map, p_pb, bonds)) error = 1; if( (!error) && (promap.nsites!=0) ) if(!find_coup_sites(pbc, top, x, map, p_ppb, nPro)) error = 1; // And identify excluded atoms for electrostatic calculation if(!error) if(!find_excluded_atoms(pbc, top, x, map, p_pb, bonds)) error = 1; if((!error) && (promap.nsites!=0)) if(!find_excluded_atoms(pbc, top, x, promap, p_ppb, nPro)) error = 1; // If anything went wrong, surrender with dignity. if(error) { for(b=0; b<bonds; b++) free_bond(p_pb[b]); if(p_pb!=NULL) free(p_pb); if(p_ppb!=NULL) free(p_ppb); if(reparray[0]!=NULL) for(i=0; i<nrep; i++) if(reparray[0][i]!=NULL) free(reparray[0][i]); if(reparray[1]!=NULL) for(i=0; i<nrep; i++) if(reparray[1][i]!=NULL) free(reparray[1][i]); if(reparray[0]!=NULL) free(reparray[0]); if(reparray[1]!=NULL) free(reparray[1]); free_resarray(resarray, nres); free_amide_map(map); free_amide_map(promap); return 0; } // Tell the user what we found. if(verbose) for(b=0; b<bonds; b++) print_bond(p_pb[b], top); if(verbose) for(b=0; b<nPro; b++) print_bond(p_ppb[b], top); /************************************************************************ Allocate memory for data arrays and open output files. **************************************************************************/ int arraysset = 0; int specfilesopen = 0; int elecfilesopen = 0; int anglefilesopen = 0; real ***elecData; real ***ProElecData; const int nelec = 10; real **angleData; real **ProAngleData; const int nangles = 4; real **freqData; real **ProFreqData; const int nfreq = 3; real ***coupData; const int ncoup = 3; rvec **coordData; rvec **ProCoordData; rvec *dipData = NULL; rvec *centData = NULL; matrix *RotMat = NULL; matrix *ProRotMat = NULL; if(!set_arrays(&elecData, &ProElecData, nelec, &angleData, &ProAngleData, nangles, &freqData, &ProFreqData, nfreq, &coupData, ncoup, &coordData, &ProCoordData, &dipData, &centData, &RotMat, &ProRotMat, bonds, map.nsites, nPro, promap.nsites)) { printf("Error allocating memory for data arrays\n"); error = 1; } else arraysset = 1; const int maxchar = 1024; FILE *infofp = NULL; int infofileopen = 0; if(!open_info_file(&infofp,outname,maxchar)) error = 1; else infofileopen = 1; FILE *specfp[5]; if(!open_spec_files(specfp,5,outname,maxchar)) error = 1; else specfilesopen = 1; FILE **elecfp[10]; if( (print_elec) && (!open_elec_files(elecfp, outname, map, maxchar))) error = 1; else if(print_elec) elecfilesopen = 1; FILE *anglefp[4]; if( (print_angles) && (!open_angle_files(anglefp, outname, maxchar))) error = 1; else if(print_angles) anglefilesopen = 1; fprintf(infofp, "BONDS: %d\n", bonds); for(b=0; b<bonds; b++) fprintf(infofp, "%s %d %s %d\n", p_pb[b].resname1, p_pb[b].resnum1, p_pb[b].resname2, p_pb[b].resnum2); if( (strcmp(map.couptype, "pdc")!=0) && (strcmp(map.couptype, "tcc")!=0) ) { printf("Error: Please specify a coupling model (pdc or tcc).\n"); error = 1; } if(error) { if(anglefilesopen==1) close_angle_files(anglefp); if(elecfilesopen==1) close_elec_files(elecfp, map); if(infofileopen==1) fclose(infofp); if(specfilesopen==1) close_spec_files(specfp, 5); if(arraysset==1) unset_arrays(elecData, ProElecData, nelec, angleData, ProAngleData, nangles, freqData, ProFreqData, nfreq, coupData, ncoup, coordData, ProCoordData, dipData, centData, RotMat, ProRotMat, bonds, map.nsites, nPro, promap.nsites); if(reparray[0]!=NULL) for(i=0; i<nrep; i++) if(reparray[0][i]!=NULL) free(reparray[0][i]); if(reparray[1]!=NULL) for(i=0; i<nrep; i++) if(reparray[1][i]!=NULL) free(reparray[1][i]); if(reparray[0]!=NULL) free(reparray[0]); if(reparray[1]!=NULL) free(reparray[1]); free_resarray(resarray, nres); free_amide_map(map); for(b=0; b<bonds; b++) free_bond(p_pb[b]); if(p_pb!=NULL) free(p_pb); return 0; } /************************************************************************ Step through trajectory and calculate parameters. **************************************************************************/ real dipstrength = 0.0; do { // Read coordinates for all amide bonds and set molecular frame axes/rotation matrix. get_coordinates(pbc, top, x, map, p_pb, bonds, RotMat, coordData); if(promap.nsites!=0) get_coordinates(pbc, top, x, promap, p_ppb, nPro, ProRotMat, ProCoordData); // Calculate electrostatic values around each bond. get_electrostatics(pbc, top, x, map, p_pb, RotMat, bonds, coordData, elecData, nelec, nthreads, cutoff, nchunks, START, STOP); if(promap.nsites!=0) get_electrostatics(pbc, top, x, promap, p_ppb, ProRotMat, nPro, ProCoordData, ProElecData, nelec, nthreads, cutoff, nchunks, START, STOP); // Calculate dihedral angles for each bond. get_angles(&pbc, top, x, map, p_pb, bonds, angleData, nangles); if(promap.nsites!=0) get_angles(&pbc, top, x, promap, p_ppb, nPro, ProAngleData, nangles); // Calculate frequencies. get_freq(map, p_pb, bonds, elecData, nelec, angleData, nangles, freqData, nfreq); if(promap.nsites!=0) get_freq(promap, p_ppb, nPro, ProElecData, nelec, ProAngleData, nangles, ProFreqData, nfreq); // Graft in proline frequencies if(promap.nsites!=0) { for(b=0; b<bonds; b++) { if(p_pb[b].isPro) { freqData[b][0] = ProFreqData[p_pb[b].isPro-1][0]; freqData[b][1] = ProFreqData[p_pb[b].isPro-1][1]; } } } if( (osc>0) && (map.dipset) && (dipstrength==0.0) ) { dipstrength = sqrt(map.dip[0]*map.dip[0] + map.dip[1]*map.dip[1] + map.dip[2]*map.dip[2]); printf("Average dipole strength: %6.4f Debye\n", dipstrength); printf("Re-scaling map dipole moment.\n"); for(i=0; i<3; i++) map.dip[i] *= (sqrt(osc)/dipstrength); dipstrength = sqrt(map.dip[0]*map.dip[0] + map.dip[1]*map.dip[1] + map.dip[2]*map.dip[2]); printf("Re-scaled average dipole strength: %6.4f Debye\n", dipstrength); } else if( (osc<0) && (map.dipset) && (dipstrength==0.0) ) { real defosc = 0.276*0.276; dipstrength = sqrt(map.dip[0]*map.dip[0] + map.dip[1]*map.dip[1] + map.dip[2]*map.dip[2]); for(i=0; i<3; i++) map.dip[i] *= (sqrt(defosc)/dipstrength); dipstrength = sqrt(map.dip[0]*map.dip[0] + map.dip[1]*map.dip[1] + map.dip[2]*map.dip[2]); printf("Assigning average dipole strength: %6.4f Debye\n", dipstrength); } else if( (osc<0) && (!map.dipset) && (dipstrength==0.0) ) { dipstrength = get_avg_tc_dip(pbc, x, map, p_pb, bonds, RotMat); printf("Using TCC-defined dipole strength: %6.4f Debye\n", dipstrength); } else if( (osc>0) && (map.tcset) && (dipstrength==0.0) ) { dipstrength = get_avg_tc_dip(pbc, x, map, p_pb, bonds, RotMat); printf("Input dipole strength: %6.4f Debye\n", dipstrength); printf("Re-scaling map charges and charge fluxes.\n"); for(i=0; i<map.ncoupsites; i++) map.CoupSites[i].q *= (sqrt(osc)/dipstrength); for(i=0; i<map.ncoupsites; i++) map.CoupSites[i].dq *= (sqrt(osc)/dipstrength); dipstrength = get_avg_tc_dip(pbc, x, map, p_pb, bonds, RotMat); printf("Re-scaled dipole strength: %6.4f Debye\n", dipstrength); } // If a dipole moment was specified, use it. if(map.dipset) get_dip(pbc, x, map, p_pb, bonds, elecData, nelec, RotMat, dipData, centData); // Otherwise, try to calculate from TCC parameters. else if(map.tcset) get_tc_dip(pbc, x, map, p_pb, bonds, RotMat, dipData, centData); // If PDC was specified, calculate PDC couplings. if(!strcmp(map.couptype, "pdc")) if(get_pdc(pbc, p_pb, bonds, dipData, centData, coupData, ncoup)) error = 1; // If TCC was specified, calculate TCC couplings. if(!strcmp(map.couptype, "tcc")) if(get_tcc(pbc, map, x, p_pb, bonds, RotMat, coupData, ncoup)) error = 1; //if(!strcmp(map.couptype, "tcc")) if(get_tcc_jansen(pbc, map, x, p_pb, bonds, RotMat, coupData, ncoup)) error = 1; // If using NNC map, get nearest-neighbor couplings. if(nnc) get_nnc(map, p_pb, bonds, angleData, coupData); if(dnnc) get_dnnc(map, p_pb, bonds, angleData, coupData); // Write data to files. write_spec_data(specfp, coupData, freqData, dipData, bonds); // If saving electrostatic data, write it to file now. if(print_elec) write_elec_dat(elecfp, map, elecData, bonds, nelec); // If saving angle data, write it to file now. if(print_angles) write_angle_dat(anglefp, angleData, bonds, nangles); // If we ran into an error, don't read any more frames. // Otherwise, continue on toward victory! if(error) trrStatus = 0; else trrStatus = read_next_x(oenv, status, &t, natoms, x, box); set_pbc(&pbc, ePBC, box); // Re-set pbc using new box } while(trrStatus); if(anglefilesopen==1) close_angle_files(anglefp); if(elecfilesopen==1) close_elec_files(elecfp, map); if(infofileopen==1) fclose(infofp); if(specfilesopen==1) close_spec_files(specfp, 5); if(arraysset==1) unset_arrays(elecData, ProElecData, nelec, angleData, ProAngleData, nangles, freqData, ProFreqData, nfreq, coupData, ncoup, coordData, ProCoordData, dipData, centData, RotMat, ProRotMat, bonds, map.nsites, nPro, promap.nsites); if(reparray[0]!=NULL) for(i=0; i<nrep; i++) if(reparray[0][i]!=NULL) free(reparray[0][i]); if(reparray[1]!=NULL) for(i=0; i<nrep; i++) if(reparray[1][i]!=NULL) free(reparray[1][i]); if(reparray[0]!=NULL) free(reparray[0]); if(reparray[1]!=NULL) free(reparray[1]); free_resarray(resarray, nres); free_amide_map(map); for(b=0; b<bonds; b++) free_bond(p_pb[b]); if(p_pb!=NULL) free(p_pb); return 0; }
dz1z2.c
# include <stdlib.h> # include <stdio.h> # include <math.h> # include <time.h> # include <omp.h> #include "common.h" int main ( int argc, char *argv[] ); double f ( double x ); int sequential ( int argc, char *argv[], double *result, double *time ); int parallel ( int argc, char *argv[], double *result, double *time ); double f ( double x ) { double pi = 3.141592653589793; double value; value = 50.0 / ( pi * ( 2500.0 * x * x + 1.0 ) ); return value; } int sequential ( int argc, char *argv[], double *result, double *time ) { double a; double b; double error; double exact = 0.49936338107645674464; int i; int n; double total; double wtime; double x; if (argc != 4) { n = 10000000; a = 0.0; b = 10.0; } else { n = atoi(argv[1]); a = atoi(argv[2]); b = atoi(argv[3]); } printf ( "\n" ); printf ( "QUAD sequential:\n" ); printf ( " Estimate the integral of f(x) from A to B.\n" ); printf ( " f(x) = 50 / ( pi * ( 2500 * x * x + 1 ) ).\n" ); printf ( "\n" ); printf ( " A = %f\n", a ); printf ( " B = %f\n", b ); printf ( " N = %d\n", n ); printf ( " Exact = %24.16f\n", exact ); wtime = omp_get_wtime ( ); total = 0.0; for ( i = 0; i < n; i++ ) { x = ( ( double ) ( n - i - 1 ) * a + ( double ) ( i ) * b ) / ( double ) ( n - 1 ); total = total + f ( x ); } wtime = omp_get_wtime ( ) - wtime; total = ( b - a ) * total / ( double ) n; error = fabs ( total - exact ); printf ( "\n" ); printf ( " Estimate = %24.16f\n", total ); *result = total; printf ( " Error = %e\n", error ); printf ( " Time = %f\n", wtime ); *time = wtime; printf ( "\n" ); printf ( " Normal end of execution.\n" ); printf ( "\n" ); return 0; } int parallel ( int argc, char *argv[], double *result, double *time ) { double a; double b; double error; double exact = 0.49936338107645674464; int i; int n; double total; double wtime; double x; if (argc != 4) { n = 10000000; a = 0.0; b = 10.0; } else { n = atoi(argv[1]); a = atoi(argv[2]); b = atoi(argv[3]); } printf ( "\n" ); printf ( "QUAD parallel:\n" ); printf ( " Estimate the integral of f(x) from A to B.\n" ); printf ( " f(x) = 50 / ( pi * ( 2500 * x * x + 1 ) ).\n" ); printf ( "\n" ); printf ( " A = %f\n", a ); printf ( " B = %f\n", b ); printf ( " N = %d\n", n ); printf ( " Exact = %24.16f\n", exact ); wtime = omp_get_wtime ( ); total = 0.0; #pragma omp parallel for \ private(x) \ reduction(+:total) for ( i = 0; i < n; i++ ) { x = ( ( double ) ( n - i - 1 ) * a + ( double ) ( i ) * b ) / ( double ) ( n - 1 ); total = total + f ( x ); } wtime = omp_get_wtime ( ) - wtime; total = ( b - a ) * total / ( double ) n; error = fabs ( total - exact ); printf ( "\n" ); printf ( " Estimate = %24.16f\n", total ); *result = total; printf ( " Error = %e\n", error ); printf ( " Time = %f\n", wtime ); *time = wtime; printf ( "\n" ); printf ( " Normal end of execution.\n" ); printf ( "\n" ); return 0; } int main ( int argc, char *argv[]) { double sequential_result, parallel_result, sequential_time, parallel_time; int err; err = parallel(argc, argv, &parallel_result, &parallel_time); if (err) { return err; } err = sequential(argc, argv, &sequential_result, &sequential_time); if (err) { return err; } finish_1(sequential_result, parallel_result, sequential_time, parallel_time); }