source
stringlengths
3
92
c
stringlengths
26
2.25M
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 16; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(8*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(4*t1+Ny+5,16)),floord(8*t2+Ny+4,16)),floord(8*t1-8*t2+Nz+Ny+3,16));t3++) { for (t4=max(max(max(0,ceild(t1-7,8)),ceild(8*t2-Nz-28,32)),ceild(16*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(4*t1+Nx+5,32)),floord(8*t2+Nx+4,32)),floord(16*t3+Nx+12,32)),floord(8*t1-8*t2+Nz+Nx+3,32));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),16*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),16*t3+14),32*t4+30),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) { lbv=max(32*t4,t5+1); ubv=min(32*t4+31,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
omp_for.c
#include <stdio.h> #include <omp.h> int main(int argc, char** argv){ int partial_Sum, total_Sum; #pragma omp parallel private(partial_Sum) shared(total_Sum) { partial_Sum = 0; total_Sum = 0; #pragma omp for for(int i = 1; i <= 10; i++){ partial_Sum += i; } //Create thread safe region. #pragma omp critical { //add each threads partial sum to the total sum total_Sum += partial_Sum; } } printf("Total Sum: %d\n", total_Sum); return 0; }
GB_binop__plus_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__plus_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__plus_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__plus_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_uint8) // A*D function (colscale): GB (_AxD__plus_uint8) // D*A function (rowscale): GB (_DxB__plus_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__plus_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__plus_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_uint8) // C=scalar+B GB (_bind1st__plus_uint8) // C=scalar+B' GB (_bind1st_tran__plus_uint8) // C=A+scalar GB (_bind2nd__plus_uint8) // C=A'+scalar GB (_bind2nd_tran__plus_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x + y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_UINT8 || GxB_NO_PLUS_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__plus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__plus_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__plus_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB (_bind1st_tran__plus_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB (_bind2nd_tran__plus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
hci.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Alexander Sokolov <alexander.y.sokolov@gmail.com> * * Slater-Condon rule implementation for Heat-Bath CI */ #include <stdlib.h> #include <stdint.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> #include "hci.h" #include <limits.h> // Computes C' = H * C in the selected CI basis void contract_h_c(double *h1, double *eri, int norb, int neleca, int nelecb, uint64_t *strs, double *civec, double *hdiag, uint64_t ndet, double *ci1) { int *ts = malloc(sizeof(int) * ndet); #pragma omp parallel { size_t ip, jp, p; int nset = (norb + 63) / 64; // Calculate excitation level for prescreening ts[0] = 0; uint64_t *str1a = strs; uint64_t *str1b = strs + nset; #pragma omp for schedule(static) for (ip = 1; ip < ndet; ++ip) { uint64_t *stria = strs + ip * 2 * nset; uint64_t *strib = strs + ip * 2 * nset + nset; ts[ip] = (n_excitations(stria, str1a, nset) + n_excitations(strib, str1b, nset)); } // Loop over pairs of determinants #pragma omp for schedule(static) for (ip = 0; ip < ndet; ++ip) { for (jp = 0; jp < ndet; ++jp) { if (abs(ts[ip] - ts[jp]) < 3) { uint64_t *stria = strs + ip * 2 * nset; uint64_t *strib = strs + ip * 2 * nset + nset; uint64_t *strja = strs + jp * 2 * nset; uint64_t *strjb = strs + jp * 2 * nset + nset; int n_excit_a = n_excitations(stria, strja, nset); int n_excit_b = n_excitations(strib, strjb, nset); // Diagonal term if (ip == jp) { ci1[ip] += hdiag[ip] * civec[ip]; } // Single excitation else if ((n_excit_a + n_excit_b) == 1) { int *ia; // alpha->alpha if (n_excit_b == 0) { ia = get_single_excitation(stria, strja, nset); int i = ia[0]; int a = ia[1]; double sign = compute_cre_des_sign(a, i, stria, nset); int *occsa = compute_occ_list(stria, nset, norb, neleca); int *occsb = compute_occ_list(strib, nset, norb, nelecb); double fai = h1[a * norb + i]; for (p = 0; p < neleca; ++p) { int k = occsa[p]; int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i; int kiak = k * norb * norb * norb + i * norb * norb + a * norb + k; fai += eri[kkai] - eri[kiak]; } for (p = 0; p < nelecb; ++p) { int k = occsb[p]; int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i; fai += eri[kkai]; } if (fabs(fai) > 1.0E-14) ci1[ip] += sign * fai * civec[jp]; free(occsa); free(occsb); } // beta->beta else if (n_excit_a == 0) { ia = get_single_excitation(strib, strjb, nset); int i = ia[0]; int a = ia[1]; double sign = compute_cre_des_sign(a, i, strib, nset); int *occsa = compute_occ_list(stria, nset, norb, neleca); int *occsb = compute_occ_list(strib, nset, norb, nelecb); double fai = h1[a * norb + i]; for (p = 0; p < nelecb; ++p) { int k = occsb[p]; int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i; int kiak = k * norb * norb * norb + i * norb * norb + a * norb + k; fai += eri[kkai] - eri[kiak]; } for (p = 0; p < neleca; ++p) { int k = occsa[p]; int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i; fai += eri[kkai]; } if (fabs(fai) > 1.0E-14) ci1[ip] += sign * fai * civec[jp]; free(occsa); free(occsb); } free(ia); } // Double excitation else if ((n_excit_a + n_excit_b) == 2) { int i, j, a, b; // alpha,alpha->alpha,alpha if (n_excit_b == 0) { int *ijab = get_double_excitation(stria, strja, nset); i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3]; double v, sign; int ajbi = a * norb * norb * norb + j * norb * norb + b * norb + i; int aibj = a * norb * norb * norb + i * norb * norb + b * norb + j; if (a > j || i > b) { v = eri[ajbi] - eri[aibj]; sign = compute_cre_des_sign(b, i, stria, nset); sign *= compute_cre_des_sign(a, j, stria, nset); } else { v = eri[aibj] - eri[ajbi]; sign = compute_cre_des_sign(b, j, stria, nset); sign *= compute_cre_des_sign(a, i, stria, nset); } if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp]; free(ijab); } // beta,beta->beta,beta else if (n_excit_a == 0) { int *ijab = get_double_excitation(strib, strjb, nset); i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3]; double v, sign; int ajbi = a * norb * norb * norb + j * norb * norb + b * norb + i; int aibj = a * norb * norb * norb + i * norb * norb + b * norb + j; if (a > j || i > b) { v = eri[ajbi] - eri[aibj]; sign = compute_cre_des_sign(b, i, strib, nset); sign *= compute_cre_des_sign(a, j, strib, nset); } else { v = eri[aibj] - eri[ajbi]; sign = compute_cre_des_sign(b, j, strib, nset); sign *= compute_cre_des_sign(a, i, strib, nset); } if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp]; free(ijab); } // alpha,beta->alpha,beta else { int *ia = get_single_excitation(stria, strja, nset); int *jb = get_single_excitation(strib, strjb, nset); i = ia[0]; a = ia[1]; j = jb[0]; b = jb[1]; double v = eri[a * norb * norb * norb + i * norb * norb + b * norb + j]; double sign = compute_cre_des_sign(a, i, stria, nset); sign *= compute_cre_des_sign(b, j, strib, nset); if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp]; free(ia); free(jb); } } } // end if over ts } // end loop over jp } // end loop over ip } // end omp free(ts); } // Compare two strings and compute excitation level int n_excitations(uint64_t *str1, uint64_t *str2, int nset) { size_t p; int d = 0; for (p = 0; p < nset; ++p) { d += popcount(str1[p] ^ str2[p]); } return d / 2; } // Compute number of set bits in a string int popcount(uint64_t x) { const uint64_t m1 = 0x5555555555555555; //binary: 0101... const uint64_t m2 = 0x3333333333333333; //binary: 00110011.. const uint64_t m4 = 0x0f0f0f0f0f0f0f0f; //binary: 4 zeros, 4 ones ... const uint64_t m8 = 0x00ff00ff00ff00ff; //binary: 8 zeros, 8 ones ... const uint64_t m16 = 0x0000ffff0000ffff; //binary: 16 zeros, 16 ones ... const uint64_t m32 = 0x00000000ffffffff; //binary: 32 zeros, 32 ones x = (x & m1 ) + ((x >> 1) & m1 ); //put count of each 2 bits into those 2 bits x = (x & m2 ) + ((x >> 2) & m2 ); //put count of each 4 bits into those 4 bits x = (x & m4 ) + ((x >> 4) & m4 ); //put count of each 8 bits into those 8 bits x = (x & m8 ) + ((x >> 8) & m8 ); //put count of each 16 bits into those 16 bits x = (x & m16) + ((x >> 16) & m16); //put count of each 32 bits into those 32 bits x = (x & m32) + ((x >> 32) & m32); //put count of each 64 bits into those 64 bits return x; } // Compute orbital indices for a single excitation int *get_single_excitation(uint64_t *str1, uint64_t *str2, int nset) { size_t p; int *ia = malloc(sizeof(int) * 2); for (p = 0; p < nset; ++p) { size_t pp = nset - p - 1; uint64_t str_tmp = str1[pp] ^ str2[pp]; uint64_t str_particle = str_tmp & str2[pp]; uint64_t str_hole = str_tmp & str1[pp]; if (popcount(str_particle) == 1) { ia[1] = trailz(str_particle) + 64 * p; } if (popcount(str_hole) == 1) { ia[0] = trailz(str_hole) + 64 * p; } } return ia; } // Compute orbital indices for a double excitation int *get_double_excitation(uint64_t *str1, uint64_t *str2, int nset) { size_t p; int *ijab = malloc(sizeof(int) * 4); int particle_ind = 2; int hole_ind = 0; for (p = 0; p < nset; ++p) { size_t pp = nset - p - 1; uint64_t str_tmp = str1[pp] ^ str2[pp]; uint64_t str_particle = str_tmp & str2[pp]; uint64_t str_hole = str_tmp & str1[pp]; int n_particle = popcount(str_particle); int n_hole = popcount(str_hole); if (n_particle == 1) { ijab[particle_ind] = trailz(str_particle) + 64 * p; particle_ind++; } else if (n_particle == 2) { int a = trailz(str_particle); ijab[2] = a + 64 * p; str_particle &= ~(1ULL << a); int b = trailz(str_particle); ijab[3] = b + 64 * p; } if (n_hole == 1) { ijab[hole_ind] = trailz(str_hole) + 64 * p; hole_ind++; } else if (n_hole == 2) { int i = trailz(str_hole); ijab[0] = i + 64 * p; str_hole &= ~(1ULL << i); int j = trailz(str_hole); ijab[1] = j + 64 * p; } } return ijab; } // Compute number of trailing zeros in a bit string int trailz(uint64_t v) { int c = 64; // Trick to unset all bits but the first one v &= -(int64_t) v; if (v) c--; if (v & 0x00000000ffffffff) c -= 32; if (v & 0x0000ffff0000ffff) c -= 16; if (v & 0x00ff00ff00ff00ff) c -= 8; if (v & 0x0f0f0f0f0f0f0f0f) c -= 4; if (v & 0x3333333333333333) c -= 2; if (v & 0x5555555555555555) c -= 1; return c; } // Function to print int as a char for debug purposes char *int2bin(uint64_t i) { size_t bits = sizeof(uint64_t) * CHAR_BIT; char * str = malloc(bits + 1); if(!str) return NULL; str[bits] = 0; // type punning because signed shift is implementation-defined uint64_t u = *(uint64_t *)&i; for(; bits--; u >>= 1) str[bits] = u & 1 ? '1' : '0'; return str; } // Compute sign for a pair of creation and desctruction operators double compute_cre_des_sign(int p, int q, uint64_t *str, int nset) { double sign; int nperm; size_t i; int pg = p / 64; int qg = q / 64; int pb = p % 64; int qb = q % 64; if (pg > qg) { nperm = 0; for (i = nset-pg; i < nset-qg-1; ++i) { nperm += popcount(str[i]); } nperm += popcount(str[nset -1 - pg] & ((1ULL << pb) - 1)); nperm += str[nset -1 - qg] >> (qb + 1); } else if (pg < qg) { nperm = 0; for (i = nset-qg; i < nset-pg-1; ++i) { nperm += popcount(str[i]); } nperm += popcount(str[nset -1 - qg] & ((1ULL << qb) - 1)); nperm += str[nset -1 - pg] >> (pb + 1); } else { uint64_t mask; if (p > q) mask = (1ULL << pb) - (1ULL << (qb + 1)); else mask = (1ULL << qb) - (1ULL << (pb + 1)); nperm = popcount(str[nset -1 - pg] & mask); } if (nperm % 2) sign = -1.0; else sign = 1.0; return sign; } // Compute a list of occupied orbitals for a given string int *compute_occ_list(uint64_t *string, int nset, int norb, int nelec) { size_t k, i; int *occ = malloc(sizeof(int) * nelec); int off = 0; int occ_ind = 0; for (k = nset; k > 0; --k) { int i_max = ((norb - off) < 64 ? (norb - off) : 64); for (i = 0; i < i_max; ++i) { int i_occ = (string[k-1] >> i) & 1; if (i_occ) { occ[occ_ind] = i + off; occ_ind++; } } off += 64; } return occ; } // Compute a list of occupied orbitals for a given string int *compute_vir_list(uint64_t *string, int nset, int norb, int nelec) { size_t k, i; int *vir = malloc(sizeof(int) * (norb-nelec)); int off = 0; int vir_ind = 0; for (k = nset; k > 0; --k) { int i_max = ((norb - off) < 64 ? (norb - off) : 64); for (i = 0; i < i_max; ++i) { int i_occ = (string[k-1] >> i) & 1; if (!i_occ) { vir[vir_ind] = i + off; vir_ind++; } } off += 64; } return vir; } // Select determinants to include in the CI space void select_strs(double *h1, double *eri, double *jk, uint64_t *eri_sorted, uint64_t *jk_sorted, int norb, int neleca, int nelecb, uint64_t *strs, double *civec, uint64_t ndet_start, uint64_t ndet_finish, double select_cutoff, uint64_t *strs_add, uint64_t* strs_add_size) { size_t p, q, r, i, k, a, ip, jp, kp, lp, ij, iset, idet; uint64_t max_strs_add = strs_add_size[0]; int nset = (norb + 63) / 64; // Compute Fock intermediates double *focka = malloc(sizeof(double) * norb * norb); double *fockb = malloc(sizeof(double) * norb * norb); for (p = 0; p < norb; ++p) { for (q = 0; q < norb; ++q) { double vja = 0.0; double vka = 0.0; for (i = 0; i < neleca; ++i) { size_t iipq = i * norb * norb * norb + i * norb * norb + p * norb + q; size_t piiq = p * norb * norb * norb + i * norb * norb + i * norb + q; vja += eri[iipq]; vka += eri[piiq]; } double vjb = 0.0; double vkb = 0.0; for (i = 0; i < nelecb; ++i) { size_t iipq = i * norb * norb * norb + i * norb * norb + p * norb + q; size_t piiq = p * norb * norb * norb + i * norb * norb + i * norb + q; vjb += eri[iipq]; vkb += eri[piiq]; } focka[p * norb + q] = h1[p * norb + q] + vja + vjb - vka; fockb[p * norb + q] = h1[p * norb + q] + vja + vjb - vkb; } } int *holes_a = malloc(sizeof(int) * norb); int *holes_b = malloc(sizeof(int) * norb); int *particles_a = malloc(sizeof(int) * norb); int *particles_b = malloc(sizeof(int) * norb); uint64_t strs_added = 0; // Loop over determinants for (idet = ndet_start; idet < ndet_finish; ++idet) { uint64_t *stra = strs + idet * 2 * nset; uint64_t *strb = strs + idet * 2 * nset + nset; int *occsa = compute_occ_list(stra, nset, norb, neleca); int *occsb = compute_occ_list(strb, nset, norb, nelecb); int *virsa = compute_vir_list(stra, nset, norb, neleca); int *virsb = compute_vir_list(strb, nset, norb, nelecb); double tol = select_cutoff / fabs(civec[idet]); // Single excitations int n_holes_a = 0; int n_holes_b = 0; int n_particles_a = 0; int n_particles_b = 0; for (p = 0; p < (norb - neleca); ++p) { i = virsa[p]; if (i < neleca) { holes_a[n_holes_a] = i; n_holes_a++; } } for (p = 0; p < neleca; ++p) { i = occsa[p]; if (i >= neleca) { particles_a[n_particles_a] = i; n_particles_a++; } } for (p = 0; p < (norb - nelecb); ++p) { i = virsb[p]; if (i < nelecb) { holes_b[n_holes_b] = i; n_holes_b++; } } for (p = 0; p < nelecb; ++p) { i = occsb[p]; if (i >= nelecb) { particles_b[n_particles_b] = i; n_particles_b++; } } // TODO: recompute Fock for each |Phi_I> and make sure it matches Fock in the code below // alpha->alpha for (p = 0; p < neleca; ++p) { i = occsa[p]; for (q = 0; q < (norb - neleca); ++q) { a = virsa[q]; double fai = focka[a * norb + i]; for (r = 0; r < n_particles_a; ++r) { k = particles_a[r]; fai += jk[k * norb * norb * norb + k * norb * norb + a * norb + i]; } for (r = 0; r < n_holes_a; ++r) { k = holes_a[r]; fai -= jk[k * norb * norb * norb + k * norb * norb + a * norb + i]; } for (r = 0; r < n_particles_b; ++r) { k = particles_b[r]; fai += eri[k * norb * norb * norb + k * norb * norb + a * norb + i]; } for (r = 0; r < n_holes_b; ++r) { k = holes_b[r]; fai -= eri[k * norb * norb * norb + k * norb * norb + a * norb + i]; } if (fabs(fai) > tol) { uint64_t *tmp = toggle_bit(stra, nset, a); uint64_t *new_str = toggle_bit(tmp, nset, i); for (iset = 0; iset < nset; ++iset) { // new alpha string strs_add[strs_added * 2 * nset + iset] = new_str[iset]; // old beta string strs_add[strs_added * 2 * nset + nset + iset] = strb[iset]; } free(tmp); free(new_str); strs_added++; } } } // beta->beta for (p = 0; p < nelecb; ++p) { i = occsb[p]; for (q = 0; q < (norb - nelecb); ++q) { a = virsb[q]; double fai = fockb[a * norb + i]; for (r = 0; r < n_particles_b; ++r) { k = particles_b[r]; fai += jk[k * norb * norb * norb + k * norb * norb + a * norb + i]; } for (r = 0; r < n_holes_b; ++r) { k = holes_b[r]; fai -= jk[k * norb * norb * norb + k * norb * norb + a * norb + i]; } for (r = 0; r < n_particles_a; ++r) { k = particles_a[r]; fai += eri[k * norb * norb * norb + k * norb * norb + a * norb + i]; } for (r = 0; r < n_holes_a; ++r) { k = holes_a[r]; fai -= eri[k * norb * norb * norb + k * norb * norb + a * norb + i]; } if (fabs(fai) > tol) { uint64_t *tmp = toggle_bit(strb, nset, a); uint64_t *new_str = toggle_bit(tmp, nset, i); for (iset = 0; iset < nset; ++iset) { // old alpha string strs_add[strs_added * 2 * nset + iset] = stra[iset]; // new beta string strs_add[strs_added * 2 * nset + nset + iset] = new_str[iset]; } free(tmp); free(new_str); strs_added++; } } } size_t ip_occ, jp_occ, kp_occ, lp_occ, ih; // Double excitations for (p = 0; p < norb * norb * norb * norb; ++p) { ih = jk_sorted[p]; int aaaa_bbbb_done = (fabs(jk[ih]) < tol); if (!aaaa_bbbb_done) { lp = ih % norb; ij = ih / norb; kp = ij % norb; ij = ij / norb; jp = ij % norb; ip = ij / norb; // alpha,alpha->alpha,alpha ip_occ = 0; jp_occ = 0; kp_occ = 0; lp_occ = 0; for (r = 0; r < neleca; ++r) { int occ_index = occsa[r]; if (ip == occ_index) ip_occ = 1; if (jp == occ_index) jp_occ = 1; if (kp == occ_index) kp_occ = 1; if (lp == occ_index) lp_occ = 1; } if (jp_occ && lp_occ && !ip_occ && !kp_occ) { uint64_t *tmp = toggle_bit(stra, nset, jp); uint64_t *new_str = toggle_bit(tmp, nset, ip); tmp = toggle_bit(new_str, nset, lp); new_str = toggle_bit(tmp, nset, kp); for (iset = 0; iset < nset; ++iset) { strs_add[strs_added * 2 * nset + iset] = new_str[iset]; strs_add[strs_added * 2 * nset + nset + iset] = strb[iset]; } free(tmp); free(new_str); strs_added++; } // beta,beta->beta,beta ip_occ = 0; jp_occ = 0; kp_occ = 0; lp_occ = 0; for (r = 0; r < nelecb; ++r) { int occ_index = occsb[r]; if (ip == occ_index) ip_occ = 1; if (jp == occ_index) jp_occ = 1; if (kp == occ_index) kp_occ = 1; if (lp == occ_index) lp_occ = 1; } if (jp_occ && lp_occ && !ip_occ && !kp_occ) { uint64_t *tmp = toggle_bit(strb, nset, jp); uint64_t *new_str = toggle_bit(tmp, nset, ip); tmp = toggle_bit(new_str, nset, lp); new_str = toggle_bit(tmp, nset, kp); for (iset = 0; iset < nset; ++iset) { strs_add[strs_added * 2 * nset + iset] = stra[iset]; strs_add[strs_added * 2 * nset + nset + iset] = new_str[iset]; } free(tmp); free(new_str); strs_added++; } } // alpha,beta->alpha,beta ih = eri_sorted[p]; int aabb_done = (fabs(eri[ih]) < tol); if (!aabb_done) { lp = ih % norb; ij = ih / norb; kp = ij % norb; ij = ij / norb; jp = ij % norb; ip = ij / norb; ip_occ = 0; jp_occ = 0; kp_occ = 0; lp_occ = 0; for (r = 0; r < neleca; ++r) { int occ_index = occsa[r]; if (ip == occ_index) ip_occ = 1; if (jp == occ_index) jp_occ = 1; } for (r = 0; r < nelecb; ++r) { int occ_index = occsb[r]; if (kp == occ_index) kp_occ = 1; if (lp == occ_index) lp_occ = 1; } if (jp_occ && lp_occ && !ip_occ && !kp_occ) { uint64_t *tmp = toggle_bit(stra, nset, jp); uint64_t *new_str_a = toggle_bit(tmp, nset, ip); tmp = toggle_bit(strb, nset, lp); uint64_t *new_str_b = toggle_bit(tmp, nset, kp); for (iset = 0; iset < nset; ++iset) { strs_add[strs_added * 2 * nset + iset] = new_str_a[iset]; strs_add[strs_added * 2 * nset + nset + iset] = new_str_b[iset]; } free(tmp); free(new_str_a); free(new_str_b); strs_added++; } } // Break statement if (aaaa_bbbb_done && aabb_done) { break; } } free(occsa); free(occsb); free(virsa); free(virsb); if (strs_added > max_strs_add) { printf("\nError: Number of selected strings is greater than the size of the buffer array (%ld vs %ld).\n", strs_added, max_strs_add); exit(EXIT_FAILURE); } } // end loop over determinants free(focka); free(fockb); free(holes_a); free(holes_b); free(particles_a); free(particles_b); strs_add_size[0] = strs_added; } // Toggle bit at a specified position uint64_t *toggle_bit(uint64_t *str, int nset, int p) { size_t i; uint64_t *new_str = malloc(sizeof(uint64_t) * nset); for (i = 0; i < nset; ++i) { new_str[i] = str[i]; } int p_set = p / 64; int p_rel = p % 64; new_str[nset - p_set - 1] ^= 1ULL << p_rel; return new_str; } // Compares two string indices and determines the order int order(uint64_t *strs_i, uint64_t *strs_j, int nset) { size_t i; for (i = 0; i < nset; ++i) { if (strs_i[i] > strs_j[i]) return 1; else if (strs_j[i] > strs_i[i]) return -1; } return 0; } // Recursive quick sort of string array indices void qsort_idx(uint64_t *strs, uint64_t *idx, uint64_t *nstrs_, int nset, uint64_t *new_idx) { size_t p; uint64_t nstrs = nstrs_[0]; if (nstrs <= 1) { for (p = 0; p < nstrs; ++p) new_idx[p] = idx[p]; } else { uint64_t ref = idx[nstrs - 1]; uint64_t *group_lt = malloc(sizeof(uint64_t) * nstrs); uint64_t *group_gt = malloc(sizeof(uint64_t) * nstrs); uint64_t group_lt_nstrs = 0; uint64_t group_gt_nstrs = 0; for (p = 0; p < (nstrs - 1); ++p) { uint64_t i = idx[p]; uint64_t *stri = strs + i * nset; uint64_t *strj = strs + ref * nset; int c = order(stri, strj, nset); if (c == -1) { group_lt[group_lt_nstrs] = i; group_lt_nstrs++; } else if (c == 1) { group_gt[group_gt_nstrs] = i; group_gt_nstrs++; } } uint64_t *new_idx_lt = malloc(sizeof(uint64_t) * group_lt_nstrs); uint64_t *new_idx_gt = malloc(sizeof(uint64_t) * group_gt_nstrs); qsort_idx(strs, group_lt, &group_lt_nstrs, nset, new_idx_lt); qsort_idx(strs, group_gt, &group_gt_nstrs, nset, new_idx_gt); nstrs = group_lt_nstrs + group_gt_nstrs + 1; nstrs_[0] = nstrs; for (p = 0; p < nstrs; ++p) { if (p < group_lt_nstrs) new_idx[p] = new_idx_lt[p]; else if (p == group_lt_nstrs) new_idx[p] = ref; else new_idx[p] = new_idx_gt[p - group_lt_nstrs - 1]; } free(new_idx_lt); free(new_idx_gt); free(group_lt); free(group_gt); } } // Helper function to perform recursive sort (nset is a total number of strings) void argunique(uint64_t *strs, uint64_t *sort_idx, uint64_t *nstrs_, int nset) { size_t p; uint64_t *init_idx = malloc(sizeof(uint64_t) * nstrs_[0]); for (p = 0; p < nstrs_[0]; ++p) init_idx[p] = p; qsort_idx(strs, init_idx, nstrs_, nset, sort_idx); free(init_idx); } // Computes C' = S2 * C in the selected CI basis void contract_ss_c(int norb, int neleca, int nelecb, uint64_t *strs, double *civec, uint64_t ndet, double *ci1) { int *ts = malloc(sizeof(int) * ndet); #pragma omp parallel { size_t ip, jp, p, q; int nset = (norb + 63) / 64; // Calculate excitation level for prescreening ts[0] = 0; uint64_t *str1a = strs; uint64_t *str1b = strs + nset; #pragma omp for schedule(static) for (ip = 1; ip < ndet; ++ip) { uint64_t *stria = strs + ip * 2 * nset; uint64_t *strib = strs + ip * 2 * nset + nset; ts[ip] = (n_excitations(stria, str1a, nset) + n_excitations(strib, str1b, nset)); } // Loop over pairs of determinants #pragma omp for schedule(static) for (ip = 0; ip < ndet; ++ip) { for (jp = 0; jp < ndet; ++jp) { if (abs(ts[ip] - ts[jp]) < 3) { uint64_t *stria = strs + ip * 2 * nset; uint64_t *strib = strs + ip * 2 * nset + nset; uint64_t *strja = strs + jp * 2 * nset; uint64_t *strjb = strs + jp * 2 * nset + nset; int n_excit_a = n_excitations(stria, strja, nset); int n_excit_b = n_excitations(strib, strjb, nset); // Diagonal term if (ip == jp) { double apb = (double) (neleca + nelecb); double amb = (double) (neleca - nelecb); double prefactor = apb / 2.0 + amb * amb / 4.0; int *occsa = compute_occ_list(stria, nset, norb, neleca); int *occsb = compute_occ_list(strib, nset, norb, nelecb); for (p = 0; p < neleca; ++p) { int pa = occsa[p]; for (q = 0; q < nelecb; ++q) { int qb = occsb[q]; if (pa == qb) prefactor -= 1.0; } } ci1[ip] += prefactor * civec[ip]; free(occsa); free(occsb); } // Double excitation else if ((n_excit_a + n_excit_b) == 2) { int i, j, a, b; // alpha,beta->alpha,beta if (n_excit_a == n_excit_b) { int *ia = get_single_excitation(stria, strja, nset); int *jb = get_single_excitation(strib, strjb, nset); i = ia[0]; a = ia[1]; j = jb[0]; b = jb[1]; if (i == b && j == a) { double sign = compute_cre_des_sign(a, i, stria, nset); sign *= compute_cre_des_sign(b, j, strib, nset); ci1[ip] -= sign * civec[jp]; } free(ia); free(jb); } } } // end if over ts } // end loop over jp } // end loop over ip } // end omp free(ts); } // Computes C' = H * C and C'' = S2 * C simultaneously in the selected CI basis void contract_h_c_ss_c(double *h1, double *eri, int norb, int neleca, int nelecb, uint64_t *strs, double *civec, double *hdiag, uint64_t ndet, double *ci1, double *ci2) { int *ts = malloc(sizeof(int) * ndet); #pragma omp parallel { size_t ip, jp, p, q; int nset = (norb + 63) / 64; // Calculate excitation level for prescreening ts[0] = 0; uint64_t *str1a = strs; uint64_t *str1b = strs + nset; #pragma omp for schedule(static) for (ip = 1; ip < ndet; ++ip) { uint64_t *stria = strs + ip * 2 * nset; uint64_t *strib = strs + ip * 2 * nset + nset; ts[ip] = (n_excitations(stria, str1a, nset) + n_excitations(strib, str1b, nset)); } // Loop over pairs of determinants #pragma omp for schedule(static) for (ip = 0; ip < ndet; ++ip) { for (jp = 0; jp < ndet; ++jp) { if (abs(ts[ip] - ts[jp]) < 3) { uint64_t *stria = strs + ip * 2 * nset; uint64_t *strib = strs + ip * 2 * nset + nset; uint64_t *strja = strs + jp * 2 * nset; uint64_t *strjb = strs + jp * 2 * nset + nset; int n_excit_a = n_excitations(stria, strja, nset); int n_excit_b = n_excitations(strib, strjb, nset); // Diagonal term if (ip == jp) { ci1[ip] += hdiag[ip] * civec[ip]; // S^2 double apb = (double) (neleca + nelecb); double amb = (double) (neleca - nelecb); double prefactor = apb / 2.0 + amb * amb / 4.0; int *occsa = compute_occ_list(stria, nset, norb, neleca); int *occsb = compute_occ_list(strib, nset, norb, nelecb); for (p = 0; p < neleca; ++p) { int pa = occsa[p]; for (q = 0; q < nelecb; ++q) { int qb = occsb[q]; if (pa == qb) prefactor -= 1.0; } } ci2[ip] += prefactor * civec[ip]; free(occsa); free(occsb); } // Single excitation else if ((n_excit_a + n_excit_b) == 1) { int *ia; // alpha->alpha if (n_excit_b == 0) { ia = get_single_excitation(stria, strja, nset); int i = ia[0]; int a = ia[1]; double sign = compute_cre_des_sign(a, i, stria, nset); int *occsa = compute_occ_list(stria, nset, norb, neleca); int *occsb = compute_occ_list(strib, nset, norb, nelecb); double fai = h1[a * norb + i]; for (p = 0; p < neleca; ++p) { int k = occsa[p]; int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i; int kiak = k * norb * norb * norb + i * norb * norb + a * norb + k; fai += eri[kkai] - eri[kiak]; } for (p = 0; p < nelecb; ++p) { int k = occsb[p]; int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i; fai += eri[kkai]; } if (fabs(fai) > 1.0E-14) ci1[ip] += sign * fai * civec[jp]; free(occsa); free(occsb); } // beta->beta else if (n_excit_a == 0) { ia = get_single_excitation(strib, strjb, nset); int i = ia[0]; int a = ia[1]; double sign = compute_cre_des_sign(a, i, strib, nset); int *occsa = compute_occ_list(stria, nset, norb, neleca); int *occsb = compute_occ_list(strib, nset, norb, nelecb); double fai = h1[a * norb + i]; for (p = 0; p < nelecb; ++p) { int k = occsb[p]; int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i; int kiak = k * norb * norb * norb + i * norb * norb + a * norb + k; fai += eri[kkai] - eri[kiak]; } for (p = 0; p < neleca; ++p) { int k = occsa[p]; int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i; fai += eri[kkai]; } if (fabs(fai) > 1.0E-14) ci1[ip] += sign * fai * civec[jp]; free(occsa); free(occsb); } free(ia); } // Double excitation else if ((n_excit_a + n_excit_b) == 2) { int i, j, a, b; // alpha,alpha->alpha,alpha if (n_excit_b == 0) { int *ijab = get_double_excitation(stria, strja, nset); i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3]; double v, sign; int ajbi = a * norb * norb * norb + j * norb * norb + b * norb + i; int aibj = a * norb * norb * norb + i * norb * norb + b * norb + j; if (a > j || i > b) { v = eri[ajbi] - eri[aibj]; sign = compute_cre_des_sign(b, i, stria, nset); sign *= compute_cre_des_sign(a, j, stria, nset); } else { v = eri[aibj] - eri[ajbi]; sign = compute_cre_des_sign(b, j, stria, nset); sign *= compute_cre_des_sign(a, i, stria, nset); } if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp]; free(ijab); } // beta,beta->beta,beta else if (n_excit_a == 0) { int *ijab = get_double_excitation(strib, strjb, nset); i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3]; double v, sign; int ajbi = a * norb * norb * norb + j * norb * norb + b * norb + i; int aibj = a * norb * norb * norb + i * norb * norb + b * norb + j; if (a > j || i > b) { v = eri[ajbi] - eri[aibj]; sign = compute_cre_des_sign(b, i, strib, nset); sign *= compute_cre_des_sign(a, j, strib, nset); } else { v = eri[aibj] - eri[ajbi]; sign = compute_cre_des_sign(b, j, strib, nset); sign *= compute_cre_des_sign(a, i, strib, nset); } if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp]; free(ijab); } // alpha,beta->alpha,beta else { int *ia = get_single_excitation(stria, strja, nset); int *jb = get_single_excitation(strib, strjb, nset); i = ia[0]; a = ia[1]; j = jb[0]; b = jb[1]; double v = eri[a * norb * norb * norb + i * norb * norb + b * norb + j]; double sign = compute_cre_des_sign(a, i, stria, nset); sign *= compute_cre_des_sign(b, j, strib, nset); if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp]; // S^2 if (i == b && j == a) { ci2[ip] -= sign * civec[jp]; } free(ia); free(jb); } } } // end if over ts } // end loop over jp } // end loop over ip } // end omp free(ts); } // 2-RDM is sorted in physicists notation: gamma_pqsr=<\Phi|a_p^dag a_q^dag a_r a_s|\Phi> void compute_rdm12s(int norb, int neleca, int nelecb, uint64_t *strs, double *civec, uint64_t ndet, double *rdm1a, double *rdm1b, double *rdm2aa, double *rdm2ab, double *rdm2bb) { #pragma omp parallel { size_t ip, jp, p, q, r, s; int nset = (norb + 63) / 64; double ci_sq = 0.0; double *rdm1a_private = malloc(sizeof(double) * norb * norb); double *rdm1b_private = malloc(sizeof(double) * norb * norb); double *rdm2aa_private = malloc(sizeof(double) * norb * norb * norb * norb); double *rdm2ab_private = malloc(sizeof(double) * norb * norb * norb * norb); double *rdm2bb_private = malloc(sizeof(double) * norb * norb * norb * norb); for (p = 0; p < norb * norb; ++p) { rdm1a_private[p] = 0.0; rdm1b_private[p] = 0.0; } for (p = 0; p < norb * norb * norb * norb; ++p) { rdm2aa_private[p] = 0.0; rdm2ab_private[p] = 0.0; rdm2bb_private[p] = 0.0; } // Loop over pairs of determinants #pragma omp for schedule(static) for (ip = 0; ip < ndet; ++ip) { for (jp = 0; jp < ndet; ++jp) { uint64_t *stria = strs + ip * 2 * nset; uint64_t *strib = strs + ip * 2 * nset + nset; uint64_t *strja = strs + jp * 2 * nset; uint64_t *strjb = strs + jp * 2 * nset + nset; int n_excit_a = n_excitations(stria, strja, nset); int n_excit_b = n_excitations(strib, strjb, nset); // Diagonal term if (ip == jp) { int *occsa = compute_occ_list(stria, nset, norb, neleca); int *occsb = compute_occ_list(strib, nset, norb, nelecb); ci_sq = civec[ip] * civec[ip]; // Diagonal rdm1_aa for (p = 0; p < neleca; ++p) { int k = occsa[p]; int kk = k * norb + k; rdm1a_private[kk] += ci_sq; } // Diagonal rdm1_bb for (p = 0; p < nelecb; ++p) { int k = occsb[p]; int kk = k * norb + k; rdm1b_private[kk] += ci_sq; } // Diagonal rdm2_aaaa for (p = 0; p < neleca; ++p) { int k = occsa[p]; for (q = 0; q < neleca; ++q) { int j = occsa[q]; int kjkj = k * norb * norb * norb + j * norb * norb + k * norb + j; int kjjk = k * norb * norb * norb + j * norb * norb + j * norb + k; rdm2aa_private[kjkj] += ci_sq; rdm2aa_private[kjjk] -= ci_sq; } // Diagonal rdm2_abab for (q = 0; q < nelecb; ++q) { int j = occsb[q]; int kjkj = k * norb * norb * norb + j * norb * norb + k * norb + j; rdm2ab_private[kjkj] += ci_sq; } } // Diagonal rdm2_bbbb for (p = 0; p < nelecb; ++p) { int k = occsb[p]; for (q = 0; q < nelecb; ++q) { int j = occsb[q]; int kjkj = k * norb * norb * norb + j * norb * norb + k * norb + j; int kjjk = k * norb * norb * norb + j * norb * norb + j * norb + k; rdm2bb_private[kjkj] += ci_sq; rdm2bb_private[kjjk] -= ci_sq; } } free(occsa); free(occsb); } // Single excitation else if ((n_excit_a + n_excit_b) == 1) { int *ia; // alpha->alpha if (n_excit_b == 0) { ia = get_single_excitation(stria, strja, nset); int i = ia[0]; int a = ia[1]; double sign = compute_cre_des_sign(a, i, stria, nset); int *occsa = compute_occ_list(stria, nset, norb, neleca); int *occsb = compute_occ_list(strib, nset, norb, nelecb); ci_sq = sign * civec[ip] * civec[jp]; // rdm1_aa rdm1a_private[a * norb + i] += ci_sq; // rdm2_aaaa for (p = 0; p < neleca; ++p) { int k = occsa[p]; int akik = a * norb * norb * norb + k * norb * norb + i * norb + k; int akki = a * norb * norb * norb + k * norb * norb + k * norb + i; int kaki = k * norb * norb * norb + a * norb * norb + k * norb + i; int kaik = k * norb * norb * norb + a * norb * norb + i * norb + k; rdm2aa_private[akik] += ci_sq; rdm2aa_private[akki] -= ci_sq; rdm2aa_private[kaik] -= ci_sq; rdm2aa_private[kaki] += ci_sq; } // rdm2_abab for (p = 0; p < nelecb; ++p) { int k = occsb[p]; int akik = a * norb * norb * norb + k * norb * norb + i * norb + k; rdm2ab_private[akik] += ci_sq; } free(occsa); free(occsb); } // beta->beta else if (n_excit_a == 0) { ia = get_single_excitation(strib, strjb, nset); int i = ia[0]; int a = ia[1]; double sign = compute_cre_des_sign(a, i, strib, nset); int *occsa = compute_occ_list(stria, nset, norb, neleca); int *occsb = compute_occ_list(strib, nset, norb, nelecb); ci_sq = sign * civec[ip] * civec[jp]; // rdm1_bb rdm1b_private[a * norb + i] += ci_sq; // rdm2_bbbb for (p = 0; p < nelecb; ++p) { int k = occsb[p]; int akik = a * norb * norb * norb + k * norb * norb + i * norb + k; int akki = a * norb * norb * norb + k * norb * norb + k * norb + i; int kaki = k * norb * norb * norb + a * norb * norb + k * norb + i; int kaik = k * norb * norb * norb + a * norb * norb + i * norb + k; rdm2bb_private[akik] += ci_sq; rdm2bb_private[akki] -= ci_sq; rdm2bb_private[kaik] -= ci_sq; rdm2bb_private[kaki] += ci_sq; } // rdm2_abab for (p = 0; p < neleca; ++p) { int k = occsa[p]; int kaki = k * norb * norb * norb + a * norb * norb + k * norb + i; rdm2ab_private[kaki] += ci_sq; } free(occsa); free(occsb); } free(ia); } // Double excitation else if ((n_excit_a + n_excit_b) == 2) { int i, j, a, b; // rdm2_aaaa if (n_excit_b == 0) { int *ijab = get_double_excitation(stria, strja, nset); i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3]; double sign; int baij = b * norb * norb * norb + a * norb * norb + i * norb + j; int baji = b * norb * norb * norb + a * norb * norb + j * norb + i; int abij = a * norb * norb * norb + b * norb * norb + i * norb + j; int abji = a * norb * norb * norb + b * norb * norb + j * norb + i; if (a > j || i > b) { sign = compute_cre_des_sign(b, i, stria, nset); sign *= compute_cre_des_sign(a, j, stria, nset); ci_sq = sign * civec[ip] * civec[jp]; rdm2aa_private[baij] += ci_sq; rdm2aa_private[baji] -= ci_sq; rdm2aa_private[abij] -= ci_sq; rdm2aa_private[abji] += ci_sq; } else { sign = compute_cre_des_sign(b, j, stria, nset); sign *= compute_cre_des_sign(a, i, stria, nset); ci_sq = sign * civec[ip] * civec[jp]; rdm2aa_private[baij] -= ci_sq; rdm2aa_private[baji] += ci_sq; rdm2aa_private[abij] += ci_sq; rdm2aa_private[abji] -= ci_sq; } free(ijab); } // rdm2_bbbb else if (n_excit_a == 0) { int *ijab = get_double_excitation(strib, strjb, nset); i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3]; double v, sign; int baij = b * norb * norb * norb + a * norb * norb + i * norb + j; int baji = b * norb * norb * norb + a * norb * norb + j * norb + i; int abij = a * norb * norb * norb + b * norb * norb + i * norb + j; int abji = a * norb * norb * norb + b * norb * norb + j * norb + i; if (a > j || i > b) { sign = compute_cre_des_sign(b, i, strib, nset); sign *= compute_cre_des_sign(a, j, strib, nset); ci_sq = sign * civec[ip] * civec[jp]; rdm2bb_private[baij] += ci_sq; rdm2bb_private[baji] -= ci_sq; rdm2bb_private[abij] -= ci_sq; rdm2bb_private[abji] += ci_sq; } else { sign = compute_cre_des_sign(b, j, strib, nset); sign *= compute_cre_des_sign(a, i, strib, nset); ci_sq = sign * civec[ip] * civec[jp]; rdm2bb_private[baij] -= ci_sq; rdm2bb_private[baji] += ci_sq; rdm2bb_private[abij] += ci_sq; rdm2bb_private[abji] -= ci_sq; } free(ijab); } // rdm2_abab else { int *ia = get_single_excitation(stria, strja, nset); int *jb = get_single_excitation(strib, strjb, nset); i = ia[0]; a = ia[1]; j = jb[0]; b = jb[1]; double sign = compute_cre_des_sign(a, i, stria, nset); sign *= compute_cre_des_sign(b, j, strib, nset); ci_sq = sign * civec[ip] * civec[jp]; int abij = a * norb * norb * norb + b * norb * norb + i * norb + j; rdm2ab_private[abij] += ci_sq; free(ia); free(jb); } } } // end loop over jp } // end loop over ip #pragma omp critical { for (p = 0; p < norb * norb; ++p) { rdm1a[p] += rdm1a_private[p]; rdm1b[p] += rdm1b_private[p]; } for (p = 0; p < norb * norb * norb * norb; ++p) { rdm2aa[p] += rdm2aa_private[p]; rdm2ab[p] += rdm2ab_private[p]; rdm2bb[p] += rdm2bb_private[p]; } } free(rdm1a_private); free(rdm1b_private); free(rdm2aa_private); free(rdm2ab_private); free(rdm2bb_private); } // end omp }
main.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include "../examples/openmp_dot_product/scalar/runner.h" #include "utils.h" struct context { int m, n, P; int* a; }; int* gen_rand_ints(int n) { int* a = malloc(n * sizeof(int)); for (int i = 0; i < n; ++i) { a[i] = rand() % 100; } return a; } int cmp_int(const void* a, const void* b) { int x = *(int*) a; int y = *(int*) b; return x - y; } long binsearch(int val, const int* a, int l, int r) { r = MAX(l, r + 1); while (l < r) { long mid = (l + r) / 2; if (val <= a[mid]) { r = mid; } else { l = mid + 1; } } return r; } void merge(int* a_start, int* a_end, int* b_start, int* b_end, int* dst) { while (a_start < a_end && b_start < b_end) { if (*a_start <= *b_start) { *dst++ = *a_start++; } else { *dst++ = *b_start++; } } while (a_start < a_end) { *dst++ = *a_start++; } while (b_start < b_end) { *dst++ = *b_start++; } } // Сливает фрагменты массива t [l1;r1] и [l2;r2], складывая результат в a[l;...] void parmerge(int* src, int l1, int r1, int l2, int r2, int* dst, int l, int chunk_size) { int len1 = r1 - l1 + 1; int len2 = r2 - l2 + 1; if (len1 < len2) { swap(l1, l2); swap(r1, r2); swap(len1, len2); } if (len1 == 0) { return; } if (len1 + len2 <= chunk_size) { merge(&src[l1], &src[l1 + len1], &src[l2], &src[l2 + len2], &dst[l]); } else { int median = (l1 + r1) / 2; int target_median = binsearch(src[median], src, l2, r2); int size_of_first_half = l + (median - l1) + (target_median - l2); dst[size_of_first_half] = src[median]; //#pragma omp single { #pragma omp task parmerge(src, l1, median - 1, l2, target_median - 1, dst, l, chunk_size); #pragma omp task parmerge(src, median + 1, r1, target_median, r2, dst, size_of_first_half + 1, chunk_size); } #pragma omp taskwait } } void parmergesort(int* src, int l, int r, int* dst, int src_to_dst, int chunk_size) { if (r == l) { if (src_to_dst) { dst[l] = src[l]; } return; } if (r - l <= chunk_size && !src_to_dst) { qsort(src + l, r - l + 1, sizeof(int), cmp_int); return; } int m = (r + l) / 2; //#pragma omp single { #pragma omp task parmergesort(src, l, m, dst, !src_to_dst, chunk_size); #pragma omp task parmergesort(src, m + 1, r, dst, !src_to_dst, chunk_size); } #pragma omp taskwait if (src_to_dst) { parmerge(src, l, m, m + 1, r, dst, l, chunk_size); } else { parmerge(dst, l, m, m + 1, r, src, l, chunk_size); } } int* parallel_merge_sort(int* a, int n, int m, int P) { int* res = malloc(n * sizeof(int)); //omp_set_nested(1); #pragma omp parallel num_threads(P) #pragma omp single nowait parmergesort(a, 0, n - 1, res, 0, m); #pragma omp taskwait free(res); } void parallel_merge_sort_run(void* ctx_void) { struct context* ctx = (struct context*) ctx_void; parallel_merge_sort(ctx->a, ctx->n, ctx->m, ctx->P); } void std_sort_run(void* ctx_void) { struct context* ctx = (struct context*) ctx_void; qsort(ctx->a, ctx->n, sizeof(int), cmp_int); } int main(int args, char* argv[]) { srand(time(NULL)); struct context ctx; ctx.n = atoi(argv[1]); ctx.m = atoi(argv[2]); ctx.P = atoi(argv[3]); int* a = gen_rand_ints(ctx.n); FILE* outf_data = fopen("data.txt", "w"); for (int i = 0; i < ctx.n; ++i) { fprintf(outf_data, "%d ", a[i]); } fprintf(outf_data, "\n"); int* a_copy = malloc(ctx.n * sizeof(int)); memcpy(a_copy, a, ctx.n * sizeof(int)); IF_DBG( { for (int i = 0; i < ctx.n; ++i) { printf("%d %d; ", a[i], a_copy[i]); } printf("\n"); }); double time_parallel, time_unparallel; ctx.a = a; if (ctx.P != 0) { time_parallel = runner_run(parallel_merge_sort_run, &ctx, "parallel sort"); } IF_DBG( { for (int i = 0; i < ctx.n; ++i) { printf("%d %d; ", a[i], a_copy[i]); } printf("\n"); }); for (int i = 0; i < ctx.n; ++i) { fprintf(outf_data, "%d ", a[i]); } fprintf(outf_data, "\n"); fclose(outf_data); ctx.a = a_copy; time_unparallel = runner_run(std_sort_run, &ctx, "not parallel sort"); IF_DBG( { for (int i = 0; i < ctx.n; ++i) { printf("%d %d; ", a[i], a_copy[i]); } printf("\n"); }); if (memcmp(a, a_copy, ctx.n * sizeof(int))) { printf("Incorrect sorting\n"); } double time = ctx.P != 0 ? time_parallel : time_unparallel; FILE* outf_stats = fopen("stats.txt", "a+"); fprintf(outf_stats, "%fs %d %d %d\n", time, ctx.n, ctx.m, ctx.P); fclose(outf_stats); free(a); free(a_copy); return 0; }
transfer.c
/** @file transfer.c Documented transfer module. * * Julien Lesgourgues, 28.07.2013 * * This module has two purposes: * * - at the beginning, to compute the transfer functions \f$ * \Delta_l^{X} (q) \f$, and store them in tables used for * interpolation in other modules. * * - at any time in the code, to evaluate the transfer functions (for * a given mode, initial condition, type and multipole l) at any * wavenumber q (by interpolating within the interpolation table). * * Hence the following functions can be called from other modules: * * -# transfer_init() at the beginning (but after perturb_init() * and bessel_init()) * * -# transfer_functions_at_q() at any later time * * -# transfer_free() at the end, when no more calls to * transfer_functions_at_q() are needed * * Note that in the standard implementation of CLASS, only the pre-computed * values of the transfer functions are used, no interpolation is necessary; * hence the routine transfer_functions_at_q() is actually never called. */ #include "transfer.h" /** * Transfer function \f$ \Delta_l^{X} (q) \f$ at a given wavenumber q. * * For a given mode (scalar, vector, tensor), initial condition, type * (temperature, polarization, lensing, etc) and multipole, computes * the transfer function for an arbitary value of q by interpolating * between pre-computed values of q. This * function can be called from whatever module at whatever time, * provided that transfer_init() has been called before, and * transfer_free() has not been called yet. * * Wavenumbers are called q in this module and k in the perturbation * module. In flat universes k=q. In non-flat universes q and k differ * through q2 = k2 + K(1+m), where m=0,1,2 for scalar, vector, * tensor. q should be used throughout the transfer module, excepted * when interpolating or manipulating the source functions S(k,tau) * calculated in the perturbation module: for a given value of q, this * should be done at the corresponding k(q). * * @param index_md Input: index of requested mode * @param index_ic Input: index of requested initial condition * @param index_tt Input: index of requested type * @param index_l Input: index of requested multipole * @param k Input: any wavenumber * @param transfer_function Output: transfer function * @return the error status */ int transfer_functions_at_q( struct transfers * ptr, int index_md, int index_ic, int index_tt, int index_l, double q, double * transfer_function ) { /** Summary: */ /** - interpolate in pre-computed table using array_interpolate_two() */ class_call(array_interpolate_two( ptr->q, 1, 0, ptr->transfer[index_md] +((index_ic * ptr->tt_size[index_md] + index_tt) * ptr->l_size[index_md] + index_l) * ptr->q_size, 1, ptr->q_size, q, transfer_function, 1, ptr->error_message), ptr->error_message, ptr->error_message); return _SUCCESS_; } /** * This routine initializes the transfers structure, (in particular, * computes table of transfer functions \f$ \Delta_l^{X} (q) \f$) * * Main steps: * * - initialize all indices in the transfers structure * and allocate all its arrays using transfer_indices_of_transfers(). * * - for each thread (in case of parallel run), initialize the fields of a memory zone called the transfer_workspace with transfer_workspace_init() * * - loop over q values. For each q, compute the bessel functions if needed with transfer_update_HIS(), and defer the calculation of all transfer functions to transfer_compute_for_each_q() * - for each thread, free the the workspace with transfer_workspace_free() * * @param ppr Input : pointer to precision structure * @param pba Input : pointer to background structure * @param pth Input : pointer to thermodynamics structure * @param ppt Input : pointer to perturbation structure * @param ptr Output: pointer to initialized transfers structure * @return the error status */ int transfer_init( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, struct nonlinear * pnl, struct transfers * ptr ) { /** Summary: */ /** - define local variables */ /* running index for wavenumbers */ int index_q; /* conformal time today */ double tau0; /* conformal time at recombination */ double tau_rec; /* order of magnitude of the oscillation period of transfer functions */ double q_period; /* maximum number of sampling times for transfer sources */ int tau_size_max; /* array of sources S(k,tau), just taken from perturbation module, or transformed if non-linear corrections are needed sources[index_md][index_ic * ppt->tp_size[index_md] + index_tp][index_tau * ppt->k_size[index_md] + index_k] */ double *** sources; /* array of source derivatives S''(k,tau) (second derivative with respect to k, not tau!), used to interpolate sources at the right values of k, sources_spline[index_md][index_ic * ppt->tp_size[index_md] + index_tp][index_tau * ppt->k_size[index_md] + index_k] */ double *** sources_spline; /* pointer on workspace (one per thread if openmp) */ struct transfer_workspace * ptw; /** - array with the correspondance between the index of sources in the perturbation module and in the transfer module, tp_of_tt[index_md][index_tt] */ int ** tp_of_tt; /* structure containing the flat spherical bessel functions */ HyperInterpStruct BIS; double xmax; /* This code can be optionally compiled with the openmp option for parallel computation. Inside parallel regions, the use of the command "return" is forbidden. For error management, instead of "return _FAILURE_", we will set the variable below to "abort = _TRUE_". This will lead to a "return _FAILURE_" jus after leaving the parallel region. */ int abort; #ifdef _OPENMP /* instrumentation times */ double tstart, tstop, tspent; #endif /** check whether any spectrum in harmonic space (i.e., any C_l's) is actually requested */ if (ppt->has_cls == _FALSE_) { ptr->has_cls = _FALSE_; if (ptr->transfer_verbose > 0) printf("No harmonic space transfer functions to compute. Transfer module skipped.\n"); return _SUCCESS_; } else ptr->has_cls = _TRUE_; if (ptr->transfer_verbose > 0) fprintf(stdout,"Computing transfers\n"); /** get number of modes (scalars, tensors...) */ ptr->md_size = ppt->md_size; /** - get conformal age / recombination time from background / thermodynamics structures (only place where these structures are used in this module) */ tau0 = pba->conformal_age; tau_rec = pth->tau_rec; /** - correspondance between k and l depend on angular diameter diatance, i.e. on curvature. */ ptr->angular_rescaling = pth->angular_rescaling; /** order of magnitude of the oscillation period of transfer functions */ q_period = 2.*_PI_/(tau0-tau_rec)*ptr->angular_rescaling; /** - initialize all indices in the transfers structure and allocate all its arrays using transfer_indices_of_transfers() */ class_call(transfer_indices_of_transfers(ppr,ppt,ptr,q_period,pba->K,pba->sgnK), ptr->error_message, ptr->error_message); /** - copy sources to a local array sources (in fact, only the pointers are copied, not the data), and eventually apply non-linear corrections to the sources */ class_alloc(sources, ptr->md_size*sizeof(double**), ptr->error_message); class_call(transfer_perturbation_copy_sources_and_nl_corrections(ppt,pnl,ptr,sources), ptr->error_message, ptr->error_message); /** - spline all the sources passed by the perturbation module with respect to k (in order to interpolate later at a given value of k) */ class_alloc(sources_spline, ptr->md_size*sizeof(double**), ptr->error_message); class_call(transfer_perturbation_source_spline(ppt,ptr,sources,sources_spline), ptr->error_message, ptr->error_message); /** - allocate and fill array describing the correspondence between perturbation types and transfer types */ class_alloc(tp_of_tt, ptr->md_size*sizeof(int*), ptr->error_message); class_call(transfer_get_source_correspondence(ppt,ptr,tp_of_tt), ptr->error_message, ptr->error_message); /** - evaluate maximum number of sampled times in the transfer sources: needs to be known here, in order to allocate a large enough workspace */ class_call(transfer_source_tau_size_max(ppr,pba,ppt,ptr,tau_rec,tau0,&tau_size_max), ptr->error_message, ptr->error_message); /** - compute flat spherical bessel functions */ xmax = ptr->q[ptr->q_size-1]*tau0; if (pba->sgnK == -1) xmax *= (ptr->l[ptr->l_size_max-1]/ppr->hyper_flat_approximation_nu)/asinh(ptr->l[ptr->l_size_max-1]/ppr->hyper_flat_approximation_nu)*1.01; class_call(hyperspherical_HIS_create(0, 1., ptr->l_size_max, ptr->l, ppr->hyper_x_min, xmax, ppr->hyper_sampling_flat, ptr->l[ptr->l_size_max-1]+1, ppr->hyper_phi_min_abs, &BIS, ptr->error_message), ptr->error_message, ptr->error_message); /* fprintf(stderr,"tau:%d l:%d q:%d\n", ppt->tau_size, ptr->l_size_max, ptr->q_size ); */ /** - eventually read the selection and evolution functions */ class_call(transfer_global_selection_read(ptr), ptr->error_message, ptr->error_message); /** (a.3.) workspace, allocated in a parallel zone since in openmp version there is one workspace per thread */ /* initialize error management flag */ abort = _FALSE_; /* beginning of parallel region */ #pragma omp parallel \ shared(tau_size_max,ptr,ppr,pba,ppt,tp_of_tt,tau_rec,sources_spline,abort,BIS,tau0) \ private(ptw,index_q,tstart,tstop,tspent) { #ifdef _OPENMP tspent = 0.; #endif /* allocate workspace */ class_call_parallel(transfer_workspace_init(ptr, ppr, &ptw, ppt->tau_size, tau_size_max, pba->K, pba->sgnK, tau0-pth->tau_cut, &BIS), ptr->error_message, ptr->error_message); /** - loop over all wavenumbers (parallelised). For each wavenumber: */ #pragma omp for schedule (dynamic) for (index_q = 0; index_q < ptr->q_size; index_q++) { #ifdef _OPENMP tstart = omp_get_wtime(); #endif if (ptr->transfer_verbose > 2) printf("Compute transfer for wavenumber [%d/%zu]\n",index_q,ptr->q_size-1); /* Update interpolation structure: */ class_call_parallel(transfer_update_HIS(ppr, ptr, ptw, index_q, tau0), ptr->error_message, ptr->error_message); class_call_parallel(transfer_compute_for_each_q(ppr, pba, ppt, ptr, tp_of_tt, index_q, tau_size_max, tau_rec, sources, sources_spline, ptw), ptr->error_message, ptr->error_message); #ifdef _OPENMP tstop = omp_get_wtime(); tspent += tstop-tstart; #endif #pragma omp flush(abort) } /* end of loop over wavenumber */ /* free workspace allocated inside parallel zone */ class_call_parallel(transfer_workspace_free(ptr,ptw), ptr->error_message, ptr->error_message); #ifdef _OPENMP if (ptr->transfer_verbose>1) printf("In %s: time spent in parallel region (loop over k's) = %e s for thread %d\n", __func__,tspent,omp_get_thread_num()); #endif } /* end of parallel region */ if (abort == _TRUE_) return _FAILURE_; /* finally, free arrays allocated outside parallel zone */ class_call(transfer_perturbation_sources_spline_free(ppt,ptr,sources_spline), ptr->error_message, ptr->error_message); class_call(transfer_perturbation_sources_free(ppt,pnl,ptr,sources), ptr->error_message, ptr->error_message); class_call(transfer_free_source_correspondence(ptr,tp_of_tt), ptr->error_message, ptr->error_message); class_call(hyperspherical_HIS_free(&BIS,ptr->error_message), ptr->error_message, ptr->error_message); return _SUCCESS_; } /** * This routine frees all the memory space allocated by transfer_init(). * * To be called at the end of each run, only when no further calls to * transfer_functions_at_k() are needed. * * @param ptr Input: pointer to transfers structure (which fields must be freed) * @return the error status */ int transfer_free( struct transfers * ptr ) { int index_md; if (ptr->has_cls == _TRUE_) { for (index_md = 0; index_md < ptr->md_size; index_md++) { free(ptr->l_size_tt[index_md]); free(ptr->transfer[index_md]); free(ptr->k[index_md]); } free(ptr->tt_size); free(ptr->l_size_tt); free(ptr->l_size); free(ptr->l); free(ptr->q); free(ptr->k); free(ptr->transfer); if (ptr->nz_size > 0) { free(ptr->nz_z); free(ptr->nz_nz); free(ptr->nz_ddnz); } if (ptr->nz_evo_size > 0) { free(ptr->nz_evo_z); free(ptr->nz_evo_nz); free(ptr->nz_evo_dlog_nz); free(ptr->nz_evo_dd_dlog_nz); } } return _SUCCESS_; } /** * This routine defines all indices and allocates all tables * in the transfers structure * * Compute list of (k, l) values, allocate and fill corresponding * arrays in the transfers structure. Allocate the array of transfer * function tables. * * @param ppr Input : pointer to precision structure * @param ppt Input : pointer to perturbation structure * @param ptr Input/Output: pointer to transfer structure * @param rs_rec Input : comoving distance to recombination * @return the error status */ int transfer_indices_of_transfers( struct precision * ppr, struct perturbs * ppt, struct transfers * ptr, double q_period, double K, int sgnK ) { /** Summary: */ /** - define local variables */ int index_md,index_tt,index_tt_common; /** define indices for transfer types */ class_alloc(ptr->tt_size,ptr->md_size * sizeof(int),ptr->error_message); /** - type indices common to scalars and tensors */ index_tt = 0; class_define_index(ptr->index_tt_t2,ppt->has_cl_cmb_temperature, index_tt,1); class_define_index(ptr->index_tt_e, ppt->has_cl_cmb_polarization,index_tt,1); index_tt_common=index_tt; /** - type indices for scalars */ if (ppt->has_scalars == _TRUE_) { index_tt = index_tt_common; class_define_index(ptr->index_tt_t0, ppt->has_cl_cmb_temperature, index_tt,1); class_define_index(ptr->index_tt_t1, ppt->has_cl_cmb_temperature, index_tt,1); class_define_index(ptr->index_tt_lcmb, ppt->has_cl_cmb_lensing_potential,index_tt,1); class_define_index(ptr->index_tt_density,ppt->has_nc_density, index_tt,ppt->selection_num); class_define_index(ptr->index_tt_rsd, ppt->has_nc_rsd, index_tt,ppt->selection_num); class_define_index(ptr->index_tt_d0, ppt->has_nc_rsd, index_tt,ppt->selection_num); class_define_index(ptr->index_tt_d1, ppt->has_nc_rsd, index_tt,ppt->selection_num); class_define_index(ptr->index_tt_nc_lens,ppt->has_nc_lens, index_tt,ppt->selection_num); class_define_index(ptr->index_tt_nc_g1, ppt->has_nc_gr, index_tt,ppt->selection_num); class_define_index(ptr->index_tt_nc_g2, ppt->has_nc_gr, index_tt,ppt->selection_num); class_define_index(ptr->index_tt_nc_g3, ppt->has_nc_gr, index_tt,ppt->selection_num); class_define_index(ptr->index_tt_nc_g4, ppt->has_nc_gr, index_tt,ppt->selection_num); class_define_index(ptr->index_tt_nc_g5, ppt->has_nc_gr, index_tt,ppt->selection_num); class_define_index(ptr->index_tt_lensing,ppt->has_cl_lensing_potential, index_tt,ppt->selection_num); ptr->tt_size[ppt->index_md_scalars]=index_tt; } /** - type indices for vectors */ if (ppt->has_vectors == _TRUE_) { index_tt = index_tt_common; class_define_index(ptr->index_tt_t1,ppt->has_cl_cmb_temperature, index_tt,1); class_define_index(ptr->index_tt_b, ppt->has_cl_cmb_polarization,index_tt,1); ptr->tt_size[ppt->index_md_vectors]=index_tt; } /** - type indices for tensors */ if (ppt->has_tensors == _TRUE_) { index_tt = index_tt_common; class_define_index(ptr->index_tt_b, ppt->has_cl_cmb_polarization,index_tt,1); ptr->tt_size[ppt->index_md_tensors]=index_tt; } /** - allocate arrays of (k, l) values and transfer functions */ /* number of l values for each mode and type, l_size_tt[index_md][index_tt], and maximized for each mode, l_size[index_md] */ class_alloc(ptr->l_size,ptr->md_size * sizeof(int),ptr->error_message); class_alloc(ptr->l_size_tt,ptr->md_size * sizeof(int *),ptr->error_message); for (index_md = 0; index_md < ptr->md_size; index_md++) { class_alloc(ptr->l_size_tt[index_md],ptr->tt_size[index_md] * sizeof(int),ptr->error_message); } /* array (of array) of transfer functions for each mode, transfer[index_md] */ class_alloc(ptr->transfer,ptr->md_size * sizeof(double *),ptr->error_message); /** get q values using transfer_get_q_list() */ class_call(transfer_get_q_list(ppr,ppt,ptr,q_period,K,sgnK), ptr->error_message, ptr->error_message); /** get k values using transfer_get_k_list() */ class_call(transfer_get_k_list(ppt,ptr,K), ptr->error_message, ptr->error_message); /* for testing, it can be useful to print the q list in a file: */ /* FILE * out=fopen("output/q","w"); int index_q; for (index_q=0; index_q < ptr->q_size; index_q++) { fprintf(out,"%d %e %e %e %e\n", index_q, ptr->q[index_q], ptr->k[0][index_q], ptr->q[index_q]/sqrt(sgnK*K), ptr->q[index_q+1]-ptr->q[index_q]); } fclose(out); */ /** get l values using transfer_get_l_list() */ class_call(transfer_get_l_list(ppr,ppt,ptr), ptr->error_message, ptr->error_message); /** - loop over modes (scalar, etc). For each mode: */ for (index_md = 0; index_md < ptr->md_size; index_md++) { /** allocate arrays of transfer functions, (ptr->transfer[index_md])[index_ic][index_tt][index_l][index_k] */ class_alloc(ptr->transfer[index_md], ppt->ic_size[index_md] * ptr->tt_size[index_md] * ptr->l_size[index_md] * ptr->q_size * sizeof(double), ptr->error_message); } return _SUCCESS_; } int transfer_perturbation_copy_sources_and_nl_corrections( struct perturbs * ppt, struct nonlinear * pnl, struct transfers * ptr, double *** sources ) { int index_md; int index_ic; int index_tp; int index_k; int index_tau; for (index_md = 0; index_md < ptr->md_size; index_md++) { class_alloc(sources[index_md], ppt->ic_size[index_md]*ppt->tp_size[index_md]*sizeof(double*), ptr->error_message); for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) { for (index_tp = 0; index_tp < ppt->tp_size[index_md]; index_tp++) { if ((pnl->method != nl_none) && (_scalars_) && (((ppt->has_source_delta_m == _TRUE_) && (index_tp == ppt->index_tp_delta_m)) || ((ppt->has_source_theta_m == _TRUE_) && (index_tp == ppt->index_tp_theta_m)) || ((ppt->has_source_phi == _TRUE_) && (index_tp == ppt->index_tp_phi)) || ((ppt->has_source_phi_prime == _TRUE_) && (index_tp == ppt->index_tp_phi_prime)) || ((ppt->has_source_phi_plus_psi == _TRUE_) && (index_tp == ppt->index_tp_phi_plus_psi)) || ((ppt->has_source_psi == _TRUE_) && (index_tp == ppt->index_tp_psi)))) { class_alloc(sources[index_md][index_ic * ppt->tp_size[index_md] + index_tp], ppt->k_size[index_md]*ppt->tau_size*sizeof(double), ptr->error_message); for (index_tau=0; index_tau<ppt->tau_size; index_tau++) { for (index_k=0; index_k<ppt->k_size[index_md]; index_k++) { sources[index_md] [index_ic * ppt->tp_size[index_md] + index_tp] [index_tau * ppt->k_size[index_md] + index_k] = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + index_tp] [index_tau * ppt->k_size[index_md] + index_k] * pnl->nl_corr_density[index_tau * ppt->k_size[index_md] + index_k]; } } } else { sources[index_md][index_ic * ppt->tp_size[index_md] + index_tp] = ppt->sources[index_md][index_ic * ppt->tp_size[index_md] + index_tp]; } } } } return _SUCCESS_; } int transfer_perturbation_source_spline( struct perturbs * ppt, struct transfers * ptr, double *** sources, double *** sources_spline ) { int index_md; int index_ic; int index_tp; for (index_md = 0; index_md < ptr->md_size; index_md++) { class_alloc(sources_spline[index_md], ppt->ic_size[index_md]*ppt->tp_size[index_md]*sizeof(double*), ptr->error_message); for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) { for (index_tp = 0; index_tp < ppt->tp_size[index_md]; index_tp++) { class_alloc(sources_spline[index_md][index_ic * ppt->tp_size[index_md] + index_tp], ppt->k_size[index_md]*ppt->tau_size*sizeof(double), ptr->error_message); class_call(array_spline_table_columns2(ppt->k[index_md], ppt->k_size[index_md], sources[index_md][index_ic * ppt->tp_size[index_md] + index_tp], ppt->tau_size, sources_spline[index_md][index_ic * ppt->tp_size[index_md] + index_tp], _SPLINE_EST_DERIV_, ptr->error_message), ptr->error_message, ptr->error_message); } } } return _SUCCESS_; } int transfer_perturbation_sources_free( struct perturbs * ppt, struct nonlinear * pnl, struct transfers * ptr, double *** sources ) { int index_md; int index_ic; int index_tp; for (index_md = 0; index_md < ptr->md_size; index_md++) { for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) { for (index_tp = 0; index_tp < ppt->tp_size[index_md]; index_tp++) { if ((pnl->method != nl_none) && (_scalars_) && (((ppt->has_source_delta_m == _TRUE_) && (index_tp == ppt->index_tp_delta_m)) || ((ppt->has_source_theta_m == _TRUE_) && (index_tp == ppt->index_tp_theta_m)) || ((ppt->has_source_phi == _TRUE_) && (index_tp == ppt->index_tp_phi)) || ((ppt->has_source_phi_prime == _TRUE_) && (index_tp == ppt->index_tp_phi_prime)) || ((ppt->has_source_phi_plus_psi == _TRUE_) && (index_tp == ppt->index_tp_phi_plus_psi)) || ((ppt->has_source_psi == _TRUE_) && (index_tp == ppt->index_tp_psi)))) { free(sources[index_md][index_ic * ppt->tp_size[index_md] + index_tp]); } } } free(sources[index_md]); } free(sources); return _SUCCESS_; } int transfer_perturbation_sources_spline_free( struct perturbs * ppt, struct transfers * ptr, double *** sources_spline ) { int index_md; int index_ic; int index_tp; for (index_md = 0; index_md < ptr->md_size; index_md++) { for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) { for (index_tp = 0; index_tp < ppt->tp_size[index_md]; index_tp++) { free(sources_spline[index_md][index_ic * ppt->tp_size[index_md] + index_tp]); } } free(sources_spline[index_md]); } free(sources_spline); return _SUCCESS_; } /** * This routine defines the number and values of mutipoles l for all modes. * * @param ppr Input : pointer to precision structure * @param ppt Input : pointer to perturbation structure * @param ptr Input/Output : pointer to transfers structure containing l's * @return the error status */ int transfer_get_l_list( struct precision * ppr, struct perturbs * ppt, struct transfers * ptr ) { int index_l; int l_max=0; int index_md; int index_tt; int increment,current_l; /* fprintf(stderr,"rescaling %e logstep %e linstep %e\n", ptr->angular_rescaling, pow(ppr->l_logstep,ptr->angular_rescaling), ppr->l_linstep*ptr->angular_rescaling); */ /* check that largests need value of l_max */ if (ppt->has_cls == _TRUE_) { if (ppt->has_scalars == _TRUE_) { if ((ppt->has_cl_cmb_temperature == _TRUE_) || (ppt->has_cl_cmb_polarization == _TRUE_) || (ppt->has_cl_cmb_lensing_potential == _TRUE_)) l_max=MAX(ppt->l_scalar_max,l_max); if ((ppt->has_cl_lensing_potential == _TRUE_) || (ppt->has_cl_number_count == _TRUE_)) l_max=MAX(ppt->l_lss_max,l_max); } if (ppt->has_tensors == _TRUE_) l_max=MAX(ppt->l_tensor_max,l_max); } /* allocate and fill l array */ /** - start from l = 2 and increase with logarithmic step */ index_l = 0; current_l = 2; increment = MAX((int)(current_l * (pow(ppr->l_logstep,ptr->angular_rescaling)-1.)),1); while (((current_l+increment) < l_max) && (increment < ppr->l_linstep*ptr->angular_rescaling)) { index_l ++; current_l += increment; increment = MAX((int)(current_l * (pow(ppr->l_logstep,ptr->angular_rescaling)-1.)),1); } /** - when the logarithmic step becomes larger than some linear step, stick to this linear step till l_max */ increment = ppr->l_linstep*ptr->angular_rescaling; while ((current_l+increment) <= l_max) { index_l ++; current_l += increment; } /** - last value set to exactly l_max */ if (current_l != l_max) { index_l ++; current_l = l_max; } ptr->l_size_max = index_l+1; /** - so far we just counted the number of values. Now repeat the whole thing but fill array with values. */ class_alloc(ptr->l,ptr->l_size_max*sizeof(int),ptr->error_message); index_l = 0; ptr->l[0] = 2; increment = MAX((int)(ptr->l[0] * (pow(ppr->l_logstep,ptr->angular_rescaling)-1.)),1); while (((ptr->l[index_l]+increment) < l_max) && (increment < ppr->l_linstep*ptr->angular_rescaling)) { index_l ++; ptr->l[index_l]=ptr->l[index_l-1]+increment; increment = MAX((int)(ptr->l[index_l] * (pow(ppr->l_logstep,ptr->angular_rescaling)-1.)),1); } increment = ppr->l_linstep*ptr->angular_rescaling; while ((ptr->l[index_l]+increment) <= l_max) { index_l ++; ptr->l[index_l]=ptr->l[index_l-1]+increment; } if (ptr->l[index_l] != l_max) { index_l ++; ptr->l[index_l]= l_max; } /* for each mode and type, find relevant size of l array, l_size_tt[index_md][index_tt] (since for some modes and types l_max can be smaller). Also, maximize this size for each mode to find l_size[index_md]. */ for (index_md=0; index_md < ppt->md_size; index_md++) { ptr->l_size[index_md] = 0; for (index_tt=0;index_tt<ptr->tt_size[index_md];index_tt++) { if (_scalars_) { if ((ppt->has_cl_cmb_temperature == _TRUE_) && ((index_tt == ptr->index_tt_t0) || (index_tt == ptr->index_tt_t1) || (index_tt == ptr->index_tt_t2))) l_max=ppt->l_scalar_max; if ((ppt->has_cl_cmb_polarization == _TRUE_) && (index_tt == ptr->index_tt_e)) l_max=ppt->l_scalar_max; if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (index_tt == ptr->index_tt_lcmb)) l_max=ppt->l_scalar_max; if ((_index_tt_in_range_(ptr->index_tt_density, ppt->selection_num, ppt->has_nc_density)) || (_index_tt_in_range_(ptr->index_tt_rsd, ppt->selection_num, ppt->has_nc_rsd)) || (_index_tt_in_range_(ptr->index_tt_d0, ppt->selection_num, ppt->has_nc_rsd)) || (_index_tt_in_range_(ptr->index_tt_d1, ppt->selection_num, ppt->has_nc_rsd)) || (_index_tt_in_range_(ptr->index_tt_nc_lens, ppt->selection_num, ppt->has_nc_lens))|| (_index_tt_in_range_(ptr->index_tt_nc_g1, ppt->selection_num, ppt->has_nc_gr)) || (_index_tt_in_range_(ptr->index_tt_nc_g2, ppt->selection_num, ppt->has_nc_gr)) || (_index_tt_in_range_(ptr->index_tt_nc_g3, ppt->selection_num, ppt->has_nc_gr)) || (_index_tt_in_range_(ptr->index_tt_nc_g4, ppt->selection_num, ppt->has_nc_gr)) || (_index_tt_in_range_(ptr->index_tt_nc_g5, ppt->selection_num, ppt->has_nc_gr)) ) l_max=ppt->l_lss_max; if ((ppt->has_cl_lensing_potential == _TRUE_) && (index_tt >= ptr->index_tt_lensing) && (index_tt < ptr->index_tt_lensing+ppt->selection_num)) l_max=ppt->l_lss_max; } if (_tensors_) { l_max = ppt->l_tensor_max; } class_test(l_max > ptr->l[ptr->l_size_max-1], ptr->error_message, "For mode %d, type %d, asked for l_max=%d greater than in Bessel table where l_max=%d", index_md, index_tt, l_max, ptr->l[ptr->l_size_max-1]); index_l=0; while (ptr->l[index_l] < l_max) index_l++; ptr->l_size_tt[index_md][index_tt]=index_l+1; if (ptr->l_size_tt[index_md][index_tt] < ptr->l_size_max) ptr->l_size_tt[index_md][index_tt]++; if (ptr->l_size_tt[index_md][index_tt] < ptr->l_size_max) ptr->l_size_tt[index_md][index_tt]++; ptr->l_size[index_md] = MAX(ptr->l_size[index_md],ptr->l_size_tt[index_md][index_tt]); } } return _SUCCESS_; } /** * This routine defines the number and values of wavenumbers q for * each mode (goes smoothly from logarithmic step for small q's to * linear step for large q's). * * @param ppr Input : pointer to precision structure * @param ppt Input : pointer to perturbation structure * @param ptr Input/Output : pointer to transfers structure containing q's * @param rs_rec Input : comoving distance to recombination * @param index_md Input: index of requested mode (scalar, tensor, etc) * @return the error status */ int transfer_get_q_list( struct precision * ppr, struct perturbs * ppt, struct transfers * ptr, double q_period, double K, int sgnK ) { int index_q; double q,q_min=0.,q_max=0.,q_step,k_max; int nu, nu_min, nu_proposed; int q_size_max; double q_approximation; double last_step=0.; int last_index=0; double q_logstep_spline; double q_logstep_trapzd; int index_md; /* first and last value in flat case*/ if (sgnK == 0) { q_min = ppt->k_min; q_max = 0.; for (index_md=0; index_md<ppt->md_size; index_md++) { q_max = MAX(q_max,ppt->k[index_md][ppt->k_size_cl[index_md]-1]); } K=0; } /* first and last value in open case*/ else if (sgnK == -1) { q_min = sqrt(ppt->k_min*ppt->k_min+K); k_max = 0.; for (index_md=0; index_md<ppt->md_size; index_md++) { k_max = MAX(k_max,ppt->k[index_md][ppt->k_size_cl[index_md]-1]); } q_max = sqrt(k_max*k_max+K); if (ppt->has_vectors == _TRUE_) q_max = MIN(q_max,sqrt(k_max*k_max+2.*K)); if (ppt->has_tensors == _TRUE_) q_max = MIN(q_max,sqrt(k_max*k_max+3.*K)); } /* first and last value in closed case*/ else if (sgnK == 1) { nu_min = 3; q_min = nu_min * sqrt(K); q_max = 0.; for (index_md=0; index_md<ppt->md_size; index_md++) { q_max = MAX(q_max,ppt->k[index_md][ppt->k_size_cl[index_md]-1]); } } /* adjust the parameter governing the log step size to curvature */ q_logstep_spline = ppr->q_logstep_spline/pow(ptr->angular_rescaling,ppr->q_logstep_open); q_logstep_trapzd = ppr->q_logstep_trapzd; /* very conservative estimate of number of values */ if (sgnK == 1) { q_approximation = MIN(ppr->hyper_flat_approximation_nu,(q_max/sqrt(K))); /* max contribution from integer nu values */ q_step = 1.+q_period*ppr->q_logstep_trapzd; q_size_max = 2*(int)(log(q_approximation/q_min)/log(q_step)); q_step = q_period*ppr->q_linstep; q_size_max += 2*(int)((q_approximation-q_min)/q_step); /* max contribution from non-integer nu values */ q_step = 1.+q_period*ppr->q_logstep_spline; q_size_max += 2*(int)(log(q_max/q_approximation)/log(q_step)); q_step = q_period*ppr->q_linstep; q_size_max += 2*(int)((q_max-q_approximation)/q_step); } else { /* max contribution from non-integer nu values */ q_step = 1.+q_period*ppr->q_logstep_spline; q_size_max = 5*(int)(log(q_max/q_min)/log(q_step)); q_step = q_period*ppr->q_linstep; q_size_max += 5*(int)((q_max-q_min)/q_step); } /* create array with this conservative size estimate. The exact size will be readjusted below, after filling the array. */ class_alloc(ptr->q, q_size_max*sizeof(double), ptr->error_message); /* assign the first value before starting the loop */ index_q = 0; ptr->q[index_q] = q_min; nu = 3; index_q++; /* loop over the values */ while (ptr->q[index_q-1] < q_max) { class_test(index_q >= q_size_max,ptr->error_message,"buggy q-list definition"); /* step size formula in flat/open case. Step goes gradually from logarithmic to linear: - in the small q limit, it is logarithmic with: (delta q / q) = q_period * q_logstep_spline - in the large q limit, it is linear with: (delta q) = q_period * ppr->q_linstep */ if (sgnK<=0) { q = ptr->q[index_q-1] + q_period * ppr->q_linstep * ptr->q[index_q-1] / (ptr->q[index_q-1] + ppr->q_linstep/q_logstep_spline); } /* step size formula in closed case. Same thing excepted that: - in the small q limit, the logarithmic step is reduced, being given by q_logstep_trapzd, and values are rounded to integer values of nu=q/sqrt(K). This happens as long as nu<nu_flat_approximation - for nu>nu_flat_approximation, the step gradually catches up the same expression as in the flat/opne case, and there is no need to round up to integer nu's. */ else { if (nu < (int)ppr->hyper_flat_approximation_nu) { q = ptr->q[index_q-1] + q_period * ppr->q_linstep * ptr->q[index_q-1] / (ptr->q[index_q-1] + ppr->q_linstep/q_logstep_trapzd); nu_proposed = (int)(q/sqrt(K)); if (nu_proposed <= nu+1) nu = nu+1; else nu = nu_proposed; q = nu*sqrt(K); last_step = q - ptr->q[index_q-1]; last_index = index_q+1; } else { q_step = q_period * ppr->q_linstep * ptr->q[index_q-1] / (ptr->q[index_q-1] + ppr->q_linstep/q_logstep_spline); if (index_q-last_index < (int)ppr->q_numstep_transition) q = ptr->q[index_q-1] + (1-(double)(index_q-last_index)/ppr->q_numstep_transition) * last_step + (double)(index_q-last_index)/ppr->q_numstep_transition * q_step; else q = ptr->q[index_q-1] + q_step; } } ptr->q[index_q] = q; index_q++; } /* infer total number of values (also checking if we overshooted the last point) */ if (ptr->q[index_q-1] > q_max) ptr->q_size=index_q-1; else ptr->q_size=index_q; class_test(ptr->q_size<2,ptr->error_message,"buggy q-list definition"); //fprintf(stderr,"q_size_max=%d q_size = %d\n",q_size_max,ptr->q_size); //fprintf(stderr,"q_size = %d\n",ptr->q_size); /* now, readjust array size */ class_realloc(ptr->q, ptr->q, ptr->q_size*sizeof(double), ptr->error_message); /* in curved universe, check at which index the flat rescaling approximation will start being used */ if (sgnK != 0) { q_approximation = ppr->hyper_flat_approximation_nu * sqrt(sgnK*K); for (ptr->index_q_flat_approximation=0; ptr->index_q_flat_approximation < ptr->q_size-1; ptr->index_q_flat_approximation++) { if (ptr->q[ptr->index_q_flat_approximation] > q_approximation) break; } if (ptr->transfer_verbose > 1) printf("Flat bessel approximation spares hyperspherical bessel computations for %zu wavenumebrs over a total of %zu\n", ptr->q_size-ptr->index_q_flat_approximation,ptr->q_size); } return _SUCCESS_; } /** * This routine infers from the q values a list of corresponding k * avlues for each mode. * * @param ppt Input : pointer to perturbation structure * @param ptr Input/Output : pointer to transfers structure containing q's * @param K Input : spatial curvature * @return the error status */ int transfer_get_k_list( struct perturbs * ppt, struct transfers * ptr, double K ) { int index_md; int index_q; double m=0.; class_alloc(ptr->k,ptr->md_size*sizeof(double*),ptr->error_message); for (index_md = 0; index_md < ptr->md_size; index_md++) { class_alloc(ptr->k[index_md],ptr->q_size*sizeof(double),ptr->error_message); if (_scalars_) { m=0.; } if (_vectors_) { m=1.; } if (_tensors_) { m=2.; } for (index_q=0; index_q < ptr->q_size; index_q++) { ptr->k[index_md][index_q] = sqrt(ptr->q[index_q]*ptr->q[index_q]-K*(m+1.)); } if (ptr->k[index_md][0] < ppt->k[index_md][0]){ /** If ptr->k[index_md][0] < ppt->k[index_md][0] at the level of rounding, adjust first value of k_list to avoid interpolation errors: */ if ((ppt->k[index_md][0]-ptr->k[index_md][0]) < 10.*DBL_EPSILON){ ptr->k[index_md][0] = ppt->k[index_md][0]; } else{ class_stop(ptr->error_message, "bug in k_list calculation: in perturbation module k_min=%e, in transfer module k_min[mode=%d]=%e, interpolation impossible", ppt->k[0][0], index_md, ptr->k[index_md][0]); } } /** class_test(ptr->k[index_md][0] < ppt->k[index_md][0], ptr->error_message, "bug in k_list calculation: in perturbation module k_min=%e, in transfer module k_min[mode=%d]=%e, interpolation impossible", ppt->k[0][0], index_md, ptr->k[index_md][0]); */ class_test(ptr->k[index_md][ptr->q_size-1] > ppt->k[0][ppt->k_size_cl[0]-1], ptr->error_message, "bug in k_list calculation: in perturbation module k_max=%e, in transfer module k_max[mode=%d]=%e, interpolation impossible", ppt->k[0][ppt->k_size_cl[0]], index_md, ptr->k[index_md][ptr->q_size-1]); } return _SUCCESS_; } /** * This routine defines the correspondence between the sources in the * perturbation and transfer module. * * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfers structure containing l's * @param index_md : Input: index of mode (scalar, tensor...) * @param tp_of_tt : Input/Output: array with the correspondance (allocated before, filled here) * @return the error status */ int transfer_get_source_correspondence( struct perturbs * ppt, struct transfers * ptr, int ** tp_of_tt ) { /* running index on modes */ int index_md; /* running index on transfer types */ int index_tt; /** - which source are we considering? Define correspondence between transfer types and source types */ for (index_md = 0; index_md < ptr->md_size; index_md++) { class_alloc(tp_of_tt[index_md],ptr->tt_size[index_md]*sizeof(int),ptr->error_message); for (index_tt=0; index_tt<ptr->tt_size[index_md]; index_tt++) { if (_scalars_) { if ((ppt->has_cl_cmb_temperature == _TRUE_) && (index_tt == ptr->index_tt_t0)) tp_of_tt[index_md][index_tt]=ppt->index_tp_t0; if ((ppt->has_cl_cmb_temperature == _TRUE_) && (index_tt == ptr->index_tt_t1)) tp_of_tt[index_md][index_tt]=ppt->index_tp_t1; if ((ppt->has_cl_cmb_temperature == _TRUE_) && (index_tt == ptr->index_tt_t2)) tp_of_tt[index_md][index_tt]=ppt->index_tp_t2; if ((ppt->has_cl_cmb_polarization == _TRUE_) && (index_tt == ptr->index_tt_e)) tp_of_tt[index_md][index_tt]=ppt->index_tp_p; if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (index_tt == ptr->index_tt_lcmb)) tp_of_tt[index_md][index_tt]=ppt->index_tp_phi_plus_psi; if (_index_tt_in_range_(ptr->index_tt_density, ppt->selection_num, ppt->has_nc_density)) tp_of_tt[index_md][index_tt]=ppt->index_tp_delta_m; if (_index_tt_in_range_(ptr->index_tt_rsd, ppt->selection_num, ppt->has_nc_rsd)) tp_of_tt[index_md][index_tt]=ppt->index_tp_theta_m; if (_index_tt_in_range_(ptr->index_tt_d0, ppt->selection_num, ppt->has_nc_rsd)) tp_of_tt[index_md][index_tt]=ppt->index_tp_theta_m; if (_index_tt_in_range_(ptr->index_tt_d1, ppt->selection_num, ppt->has_nc_rsd)) tp_of_tt[index_md][index_tt]=ppt->index_tp_theta_m; if (_index_tt_in_range_(ptr->index_tt_nc_lens, ppt->selection_num, ppt->has_nc_lens)) tp_of_tt[index_md][index_tt]=ppt->index_tp_phi_plus_psi; if (_index_tt_in_range_(ptr->index_tt_nc_g1, ppt->selection_num, ppt->has_nc_gr)) tp_of_tt[index_md][index_tt]=ppt->index_tp_psi; if (_index_tt_in_range_(ptr->index_tt_nc_g2, ppt->selection_num, ppt->has_nc_gr)) tp_of_tt[index_md][index_tt]=ppt->index_tp_phi; if (_index_tt_in_range_(ptr->index_tt_nc_g3, ppt->selection_num, ppt->has_nc_gr)) tp_of_tt[index_md][index_tt]=ppt->index_tp_phi_prime; if (_index_tt_in_range_(ptr->index_tt_nc_g4, ppt->selection_num, ppt->has_nc_gr)) tp_of_tt[index_md][index_tt]=ppt->index_tp_phi_plus_psi; if (_index_tt_in_range_(ptr->index_tt_nc_g5, ppt->selection_num, ppt->has_nc_gr)) tp_of_tt[index_md][index_tt]=ppt->index_tp_phi_plus_psi; if ((ppt->has_cl_lensing_potential == _TRUE_) && (index_tt >= ptr->index_tt_lensing) && (index_tt < ptr->index_tt_lensing+ppt->selection_num)) tp_of_tt[index_md][index_tt]=ppt->index_tp_phi_plus_psi; } if (_vectors_) { if ((ppt->has_cl_cmb_temperature == _TRUE_) && (index_tt == ptr->index_tt_t1)) tp_of_tt[index_md][index_tt]=ppt->index_tp_t1; if ((ppt->has_cl_cmb_temperature == _TRUE_) && (index_tt == ptr->index_tt_t2)) tp_of_tt[index_md][index_tt]=ppt->index_tp_t2; if ((ppt->has_cl_cmb_polarization == _TRUE_) && (index_tt == ptr->index_tt_e)) tp_of_tt[index_md][index_tt]=ppt->index_tp_p; if ((ppt->has_cl_cmb_polarization == _TRUE_) && (index_tt == ptr->index_tt_b)) tp_of_tt[index_md][index_tt]=ppt->index_tp_p; } if (_tensors_) { if ((ppt->has_cl_cmb_temperature == _TRUE_) && (index_tt == ptr->index_tt_t2)) tp_of_tt[index_md][index_tt]=ppt->index_tp_t2; if ((ppt->has_cl_cmb_polarization == _TRUE_) && (index_tt == ptr->index_tt_e)) tp_of_tt[index_md][index_tt]=ppt->index_tp_p; if ((ppt->has_cl_cmb_polarization == _TRUE_) && (index_tt == ptr->index_tt_b)) tp_of_tt[index_md][index_tt]=ppt->index_tp_p; } } } return _SUCCESS_; } int transfer_free_source_correspondence( struct transfers * ptr, int ** tp_of_tt ) { int index_md; for (index_md = 0; index_md < ptr->md_size; index_md++) { free(tp_of_tt[index_md]); } free(tp_of_tt); return _SUCCESS_; } int transfer_source_tau_size_max( struct precision * ppr, struct background * pba, struct perturbs * ppt, struct transfers * ptr, double tau_rec, double tau0, int * tau_size_max ) { int index_md; int index_tt; int tau_size_tt=0; *tau_size_max = 0; for (index_md = 0; index_md < ptr->md_size; index_md++) { for (index_tt = 0; index_tt < ptr->tt_size[index_md]; index_tt++) { class_call(transfer_source_tau_size(ppr, pba, ppt, ptr, tau_rec, tau0, index_md, index_tt, &tau_size_tt), ptr->error_message, ptr->error_message); *tau_size_max = MAX(*tau_size_max,tau_size_tt); } } return _SUCCESS_; } /** * the code makes a distinction between "perturbation sources" * (e.g. gravitational potential) and "transfer sources" (e.g. total * density fluctuations, obtained through the Poisson equation, and * observed with a given selection function). * * This routine computes the number of sampled time values for each type * of transfer sources. * * @param ppr Input : pointer to precision structure * @param pba Input : pointer to background structure * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfers structure * @param tau_rec Input : recombination time * @param tau0 Input : time today * @param index_md Input : index of the mode (scalar, tensor) * @param index_tt Input : index of transfer type * @param tau_size Output: pointer to number of smapled times * @return the error status */ int transfer_source_tau_size( struct precision * ppr, struct background * pba, struct perturbs * ppt, struct transfers * ptr, double tau_rec, double tau0, int index_md, int index_tt, int * tau_size) { /* values of conformal time */ double tau_min,tau_mean,tau_max; /* minimum value of index_tt */ int index_tau_min; /* value of l at which limber approximation is switched on */ int l_limber; /* current redhsift bin number */ int bin=0; /* scalar mode */ if (_scalars_) { /* scalar temperature */ if ((ppt->has_cl_cmb_temperature == _TRUE_) && ((index_tt == ptr->index_tt_t0) || (index_tt == ptr->index_tt_t1) || (index_tt == ptr->index_tt_t2))) *tau_size = ppt->tau_size; /* scalar polarisation */ if ((ppt->has_cl_cmb_polarization == _TRUE_) && (index_tt == ptr->index_tt_e)) *tau_size = ppt->tau_size; /* cmb lensing potential */ if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (index_tt == ptr->index_tt_lcmb)) { /* find times before recombination, that will be thrown away */ index_tau_min=0; while (ppt->tau_sampling[index_tau_min]<=tau_rec) index_tau_min++; /* infer number of time steps after removing early times */ *tau_size = ppt->tau_size-index_tau_min; } /* density Cl's */ if ((_index_tt_in_range_(ptr->index_tt_density, ppt->selection_num, ppt->has_nc_density)) || (_index_tt_in_range_(ptr->index_tt_rsd, ppt->selection_num, ppt->has_nc_rsd)) || (_index_tt_in_range_(ptr->index_tt_d0, ppt->selection_num, ppt->has_nc_rsd)) || (_index_tt_in_range_(ptr->index_tt_d1, ppt->selection_num, ppt->has_nc_rsd)) || (_index_tt_in_range_(ptr->index_tt_nc_g1, ppt->selection_num, ppt->has_nc_gr)) || (_index_tt_in_range_(ptr->index_tt_nc_g2, ppt->selection_num, ppt->has_nc_gr)) || (_index_tt_in_range_(ptr->index_tt_nc_g3, ppt->selection_num, ppt->has_nc_gr)) ) { /* bin number associated to particular redshift bin and selection function */ if (_index_tt_in_range_(ptr->index_tt_density, ppt->selection_num, ppt->has_nc_density)) bin = index_tt - ptr->index_tt_density; if (_index_tt_in_range_(ptr->index_tt_rsd, ppt->selection_num, ppt->has_nc_rsd)) bin = index_tt - ptr->index_tt_rsd; if (_index_tt_in_range_(ptr->index_tt_d0, ppt->selection_num, ppt->has_nc_rsd)) bin = index_tt - ptr->index_tt_d0; if (_index_tt_in_range_(ptr->index_tt_d1, ppt->selection_num, ppt->has_nc_rsd)) bin = index_tt - ptr->index_tt_d1; if (_index_tt_in_range_(ptr->index_tt_nc_g1, ppt->selection_num, ppt->has_nc_gr)) bin = index_tt - ptr->index_tt_nc_g1; if (_index_tt_in_range_(ptr->index_tt_nc_g2, ppt->selection_num, ppt->has_nc_gr)) bin = index_tt - ptr->index_tt_nc_g2; if (_index_tt_in_range_(ptr->index_tt_nc_g3, ppt->selection_num, ppt->has_nc_gr)) bin = index_tt - ptr->index_tt_nc_g3; /* time interval for this bin */ class_call(transfer_selection_times(ppr, pba, ppt, ptr, bin, &tau_min, &tau_mean, &tau_max), ptr->error_message, ptr->error_message); /* case selection=dirac */ if (tau_min == tau_max) { *tau_size = 1; } /* other cases (gaussian, top-hat...) */ else { /* check that selection function well sampled */ *tau_size = (int)ppr->selection_sampling; /* value of l at which the code switches to Limber approximation (necessary for next step) */ l_limber=ppr->l_switch_limber_for_cl_density_over_z*ppt->selection_mean[bin]; /* check that bessel well sampled, if not define finer sampling overwriting the previous one. One Bessel oscillations corresponds to [Delta tau]=2pi/k. This is minimal for largest relevant k_max, namely k_max=l_limber/(tau0-tau_mean). We need to cut the interval (tau_max-tau_min) in pieces of size [Delta tau]=2pi/k_max. This gives the number below. */ *tau_size=MAX(*tau_size,(int)((tau_max-tau_min)/((tau0-tau_mean)/l_limber))*ppr->selection_sampling_bessel); } } /* galaxy lensing Cl's, differs from density Cl's since the source function will spread from the selection function region up to tau0 */ if ((_index_tt_in_range_(ptr->index_tt_lensing, ppt->selection_num, ppt->has_cl_lensing_potential)) || (_index_tt_in_range_(ptr->index_tt_nc_lens, ppt->selection_num, ppt->has_nc_lens)) || (_index_tt_in_range_(ptr->index_tt_nc_g4, ppt->selection_num, ppt->has_nc_gr)) || (_index_tt_in_range_(ptr->index_tt_nc_g5, ppt->selection_num, ppt->has_nc_gr)) ) { /* bin number associated to particular redshift bin and selection function */ if (_index_tt_in_range_(ptr->index_tt_lensing, ppt->selection_num, ppt->has_cl_lensing_potential)) bin = index_tt - ptr->index_tt_lensing; if (_index_tt_in_range_(ptr->index_tt_nc_lens, ppt->selection_num, ppt->has_nc_lens)) bin = index_tt - ptr->index_tt_nc_lens; if (_index_tt_in_range_(ptr->index_tt_nc_g4, ppt->selection_num, ppt->has_nc_gr)) bin = index_tt - ptr->index_tt_nc_g4; if (_index_tt_in_range_(ptr->index_tt_nc_g5, ppt->selection_num, ppt->has_nc_gr)) bin = index_tt - ptr->index_tt_nc_g5; /* time interval for this bin */ class_call(transfer_selection_times(ppr, pba, ppt, ptr, bin, &tau_min, &tau_mean, &tau_max), ptr->error_message, ptr->error_message); /* check that selection function well sampled */ *tau_size = (int)ppr->selection_sampling; /* value of l at which the code switches to Limber approximation (necessary for next step) */ l_limber=ppr->l_switch_limber_for_cl_density_over_z*ppt->selection_mean[bin]; /* check that bessel well sampled, if not define finer sampling overwriting the previous one. One Bessel oscillations corresponds to [Delta tau]=2pi/k. This is minimal for largest relevant k_max, namely k_max=l_limber/((tau0-tau_mean)/2). We need to cut the interval (tau_0-tau_min) in pieces of size [Delta tau]=2pi/k_max. This gives the number below. */ *tau_size=MAX(*tau_size,(int)((tau0-tau_min)/((tau0-tau_mean)/2./l_limber))*ppr->selection_sampling_bessel); } } /* tensor mode */ if (_tensors_) { /* for all tensor types */ *tau_size = ppt->tau_size; } return _SUCCESS_; } int transfer_compute_for_each_q( struct precision * ppr, struct background * pba, struct perturbs * ppt, struct transfers * ptr, int ** tp_of_tt, int index_q, int tau_size_max, double tau_rec, double *** pert_sources, double *** pert_sources_spline, struct transfer_workspace * ptw ) { /** Summary: */ /** - define local variables */ /* running index for modes */ int index_md; /* running index for initial conditions */ int index_ic; /* running index for transfer types */ int index_tt; /* running index for multipoles */ int index_l; /* we deal with workspaces, i.e. with contiguous memory zones (one per thread) containing various fields used by the integration routine */ /* - first workspace field: perturbation source interpolated from perturbation stucture */ double * interpolated_sources; /* - second workspace field: list of tau0-tau values, tau0_minus_tau[index_tau] */ double * tau0_minus_tau; /* - third workspace field: list of trapezoidal weights for integration over tau */ double * w_trapz; /* - fourth workspace field, containing just a double: number of time values */ int * tau_size; /* - fifth workspace field, identical to above interpolated sources: sources[index_tau] */ double * sources; /** - for a given l, maximum value of k such that we can convolve the source with Bessel functions j_l(x) without reaching x_max */ double q_max_bessel; /* a value of index_type */ int previous_type; double l; short neglect; radial_function_type radial_type; /** store the sources in the workspace and define all fields in this workspace */ interpolated_sources = ptw->interpolated_sources; tau0_minus_tau = ptw->tau0_minus_tau; w_trapz = ptw->w_trapz; tau_size = &(ptw->tau_size); sources = ptw->sources; /** - loop over all modes. For each mode: */ for (index_md = 0; index_md < ptr->md_size; index_md++) { /* if we reached q_max for this mode, there is nothing to be done */ if (ptr->k[index_md][index_q] <= ppt->k[index_md][ppt->k_size_cl[index_md]-1]) { /** - loop over initial conditions. For each of them: */ for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) { /* initialize the previous type index */ previous_type=-1; /** - loop over types. For each of them: */ for (index_tt = 0; index_tt < ptr->tt_size[index_md]; index_tt++) { /** check if we must now deal with a new source with a new index ppt->index_type. If yes, interpolate it at the right values of k. */ if (tp_of_tt[index_md][index_tt] != previous_type) { class_call(transfer_interpolate_sources(ppt, ptr, index_q, index_md, index_ic, tp_of_tt[index_md][index_tt], pert_sources[index_md][index_ic * ppt->tp_size[index_md] + tp_of_tt[index_md][index_tt]], pert_sources_spline[index_md][index_ic * ppt->tp_size[index_md] + tp_of_tt[index_md][index_tt]], interpolated_sources), ptr->error_message, ptr->error_message); } previous_type = tp_of_tt[index_md][index_tt]; /* the code makes a distinction between "perturbation sources" (e.g. gravitational potential) and "transfer sources" (e.g. total density fluctuations, obtained through the Poisson equation, and observed with a given selection function). The next routine computes the transfer source given the interpolated perturbation source, and copies it in the workspace. */ class_call(transfer_sources(ppr, pba, ppt, ptr, interpolated_sources, tau_rec, index_q, index_md, index_tt, sources, tau0_minus_tau, w_trapz, tau_size), ptr->error_message, ptr->error_message); /* now that the array of times tau0_minus_tau is known, we can infer the arry of radial coordinates r(tau0_minus_tau) as well as a few other quantities related by trigonometric functions */ class_call(transfer_radial_coordinates(ptr,ptw,index_md,index_q), ptr->error_message, ptr->error_message); /** Select radial function type: */ class_call(transfer_select_radial_function( ppt, ptr, index_md, index_tt, &radial_type), ptr->error_message, ptr->error_message); for (index_l = 0; index_l < ptr->l_size[index_md]; index_l++) { l = (double)ptr->l[index_l]; /* neglect transfer function when l is much smaller than k*tau0 */ class_call(transfer_can_be_neglected(ppr, ppt, ptr, index_md, index_ic, index_tt, (pba->conformal_age-tau_rec)*ptr->angular_rescaling, ptr->q[index_q], l, &neglect), ptr->error_message, ptr->error_message); /* for K>0 (closed), transfer functions only defined for l<nu */ if ((ptw->sgnK == 1) && (ptr->l[index_l] >= (int)(ptr->q[index_q]/sqrt(ptw->K)+0.2))) { neglect = _TRUE_; } /* This would maybe go into transfer_can_be_neglected later: */ if ((ptw->sgnK != 0) && (index_l>=ptw->HIS.l_size) && (index_q < ptr->index_q_flat_approximation)) { neglect = _TRUE_; } if (neglect == _TRUE_) { ptr->transfer[index_md][((index_ic * ptr->tt_size[index_md] + index_tt) * ptr->l_size[index_md] + index_l) * ptr->q_size + index_q] = 0.; } else { /* for a given l, maximum value of k such that we can convolve the source with Bessel functions j_l(x) without reaching x_max (this is relevant in the flat case when the bessels are compiuted with the old bessel module. otherwise this condition is guaranteed by the choice of proper xmax when computing bessels) */ if (ptw->sgnK == 0) { q_max_bessel = ptw->pBIS->x[ptw->pBIS->x_size-1]/tau0_minus_tau[0]; } else { q_max_bessel = ptr->q[ptr->q_size-1]; } /* neglect late time CMB sources when l is above threshold */ class_call(transfer_late_source_can_be_neglected(ppr, ppt, ptr, index_md, index_tt, l, &(ptw->neglect_late_source)), ptr->error_message, ptr->error_message); /* compute the transfer function for this l */ class_call(transfer_compute_for_each_l( ptw, ppr, ppt, ptr, index_q, index_md, index_ic, index_tt, index_l, l, q_max_bessel, radial_type ), ptr->error_message, ptr->error_message); } } /* end of loop over l */ } /* end of loop over type */ } /* end of loop over initial condition */ } else { for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) { for (index_tt = 0; index_tt < ptr->tt_size[index_md]; index_tt++) { for (index_l = 0; index_l < ptr->l_size[index_md]; index_l++) { ptr->transfer[index_md][((index_ic * ptr->tt_size[index_md] + index_tt) * ptr->l_size[index_md] + index_l) * ptr->q_size + index_q] = 0.; } } } } } /* end of loop over mode */ return _SUCCESS_; } int transfer_radial_coordinates( struct transfers * ptr, struct transfer_workspace * ptw, int index_md, int index_q ) { int index_tau; double sqrt_absK=0.; switch (ptw->sgnK){ case 1: sqrt_absK = sqrt(ptw->K); for (index_tau=0; index_tau < ptw->tau_size; index_tau++) { ptw->chi[index_tau] = sqrt_absK*ptw->tau0_minus_tau[index_tau]; ptw->cscKgen[index_tau] = sqrt_absK/ptr->k[index_md][index_q]/sin(ptw->chi[index_tau]); ptw->cotKgen[index_tau] = ptw->cscKgen[index_tau]*cos(ptw->chi[index_tau]); } break; case 0: for (index_tau=0; index_tau < ptw->tau_size; index_tau++) { ptw->chi[index_tau] = ptr->k[index_md][index_q] * ptw->tau0_minus_tau[index_tau]; ptw->cscKgen[index_tau] = 1.0/ptw->chi[index_tau]; ptw->cotKgen[index_tau] = 1.0/ptw->chi[index_tau]; } break; case -1: sqrt_absK = sqrt(-ptw->K); for (index_tau=0; index_tau < ptw->tau_size; index_tau++) { ptw->chi[index_tau] = sqrt_absK*ptw->tau0_minus_tau[index_tau]; ptw->cscKgen[index_tau] = sqrt_absK/ptr->k[index_md][index_q]/sinh(ptw->chi[index_tau]); ptw->cotKgen[index_tau] = ptw->cscKgen[index_tau]*cosh(ptw->chi[index_tau]); } break; } return _SUCCESS_; } /** * This routine interpolates sources \f$ S(k, \tau) \f$ for each mode, * initial condition and type (of perturbation module), to get them at * the right values of k, using the spline interpolation method. * * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfers structure * @param index_md Input : index of mode * @param index_ic Input : index of initial condition * @param index_type Input : index of type of source (in perturbation module) * @param pert_source Input : array of sources * @param pert_source_spline Input : array of second derivative of sources * @param interpolated_sources Output: array of interpolated sources (filled here but allocated in transfer_init() to avoid numerous reallocation) * @return the error status */ int transfer_interpolate_sources( struct perturbs * ppt, struct transfers * ptr, int index_q, int index_md, int index_ic, int index_type, double * pert_source, /* array with argument pert_source[index_tau*ppt->k_size[index_md]+index_k] (must be allocated) */ double * pert_source_spline, /* array with argument pert_source_spline[index_tau*ppt->k_size[index_md]+index_k] (must be allocated) */ double * interpolated_sources /* array with argument interpolated_sources[index_q*ppt->tau_size+index_tau] (must be allocated) */ ) { /** Summary: */ /** - define local variables */ /* index running on k values in the original source array */ int index_k; /* index running on time */ int index_tau; /* variables used for spline interpolation algorithm */ double h, a, b; /** - interpolate at each k value using the usual spline interpolation algorithm. */ index_k = 0; h = ppt->k[index_md][index_k+1] - ppt->k[index_md][index_k]; while (((index_k+1) < ppt->k_size[index_md]) && (ppt->k[index_md][index_k+1] < ptr->k[index_md][index_q])) { index_k++; h = ppt->k[index_md][index_k+1] - ppt->k[index_md][index_k]; } class_test(h==0., ptr->error_message, "stop to avoid division by zero"); b = (ptr->k[index_md][index_q] - ppt->k[index_md][index_k])/h; a = 1.-b; for (index_tau = 0; index_tau < ppt->tau_size; index_tau++) { interpolated_sources[index_tau] = a * pert_source[index_tau*ppt->k_size[index_md]+index_k] + b * pert_source[index_tau*ppt->k_size[index_md]+index_k+1] + ((a*a*a-a) * pert_source_spline[index_tau*ppt->k_size[index_md]+index_k] +(b*b*b-b) * pert_source_spline[index_tau*ppt->k_size[index_md]+index_k+1])*h*h/6.0; } return _SUCCESS_; } /** * the code makes a distinction between "perturbation sources" * (e.g. gravitational potential) and "transfer sources" (e.g. total * density fluctuations, obtained through the Poisson equation, and * observed with a given selection function). * * This routine computes the transfer source given the interpolated * perturbation source, and copies it in the workspace. * * @param ppr Input : pointer to precision structure * @param pba Input : pointer to background structure * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfers structure * @param interpolated_sources Input : interpolated perturbation source * @param tau_rec Input : recombination time * @param index_md Input : index of mode * @param index_tt Input : index of type of (transfer) source * @param sources Output: transfer source * @param tau0_minus_tau Output: values of (tau0-tau) at which source are sample * @param w_trapz Output: trapezoidal weights for integration over tau * @param tau_size_double Output: pointer to size of previous two arrays, converted to double * @return the error status */ int transfer_sources( struct precision * ppr, struct background * pba, struct perturbs * ppt, struct transfers * ptr, double * interpolated_sources, double tau_rec, int index_q, int index_md, int index_tt, double * sources, double * tau0_minus_tau, double * w_trapz, int * tau_size_out ) { /** Summary: */ /** - define local variables */ /* index running on time */ int index_tau; /* bin for computation of cl_density */ int bin=0; /* number of tau values */ int tau_size; /* minimum tau index kept in transfer sources */ int index_tau_min; /* for calling background_at_eta */ int last_index; double * pvecback = NULL; /* conformal time */ double tau, tau0; /* rescaling factor depending on the background at a given time */ double rescaling=0.; /* flag: is there any difference between the perturbation and transfer source? */ short redefine_source; /* array of selection function values at different times */ double * selection; /* array of time sampling for lensing source selection function */ double * tau0_minus_tau_lensing_sources; /* trapezoidal weights for lensing source selection function */ double * w_trapz_lensing_sources; /* index running on time in previous two arrays */ int index_tau_sources; /* number of time values in previous two arrays */ int tau_sources_size; /* source evolution factor */ double f_evo = 0.; /* when the selection function is multiplied by a function dNdz */ double z; double dNdz; double dln_dNdz_dz; /* in which cases are perturbation and transfer sources are different? I.e., in which case do we need to mutiply the sources by some background and/or window function, and eventually to resample it, or redfine its time limits? */ redefine_source = _FALSE_; if (_scalars_) { /* cmb lensing potential */ if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (index_tt == ptr->index_tt_lcmb)) redefine_source = _TRUE_; /* number count Cl's */ if ((_index_tt_in_range_(ptr->index_tt_density, ppt->selection_num, ppt->has_nc_density)) || (_index_tt_in_range_(ptr->index_tt_rsd, ppt->selection_num, ppt->has_nc_rsd)) || (_index_tt_in_range_(ptr->index_tt_d0, ppt->selection_num, ppt->has_nc_rsd)) || (_index_tt_in_range_(ptr->index_tt_d1, ppt->selection_num, ppt->has_nc_rsd)) || (_index_tt_in_range_(ptr->index_tt_nc_lens, ppt->selection_num, ppt->has_nc_lens))|| (_index_tt_in_range_(ptr->index_tt_nc_g1, ppt->selection_num, ppt->has_nc_gr)) || (_index_tt_in_range_(ptr->index_tt_nc_g2, ppt->selection_num, ppt->has_nc_gr)) || (_index_tt_in_range_(ptr->index_tt_nc_g3, ppt->selection_num, ppt->has_nc_gr)) || (_index_tt_in_range_(ptr->index_tt_nc_g4, ppt->selection_num, ppt->has_nc_gr)) || (_index_tt_in_range_(ptr->index_tt_nc_g5, ppt->selection_num, ppt->has_nc_gr)) ) redefine_source = _TRUE_; /* galaxy lensing potential */ if ((ppt->has_cl_lensing_potential == _TRUE_) && (index_tt >= ptr->index_tt_lensing) && (index_tt < ptr->index_tt_lensing+ppt->selection_num)) redefine_source = _TRUE_; } /* conformal time today */ tau0 = pba->conformal_age; /* case where we need to redefine by a window function (or any function of the background and of k) */ if (redefine_source == _TRUE_) { class_call(transfer_source_tau_size(ppr, pba, ppt, ptr, tau_rec, tau0, index_md, index_tt, &tau_size), ptr->error_message, ptr->error_message); if (_scalars_) { /* lensing source: throw away times before recombuination, and multiply psi by window function */ if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (index_tt == ptr->index_tt_lcmb)) { /* first time step after removing early times */ index_tau_min = ppt->tau_size - tau_size; /* loop over time and rescale */ for (index_tau = index_tau_min; index_tau < ppt->tau_size; index_tau++) { /* conformal time */ tau = ppt->tau_sampling[index_tau]; /* lensing source = - W(tau) (phi(k,tau) + psi(k,tau)) Heaviside(tau-tau_rec) with psi,phi = metric perturbation in newtonian gauge (phi+psi = Phi_A-Phi_H of Bardeen) W = (tau-tau_rec)/(tau_0-tau)/(tau_0-tau_rec) H(x) = Heaviside (in tau = tau_0, set source = 0 to avoid division by zero; regulated anyway by Bessel). */ if (index_tau == ppt->tau_size-1) { rescaling=0.; } else { rescaling = (tau_rec-tau)/(tau0-tau)/(tau0-tau_rec); } /* copy from input array to output array */ sources[index_tau-index_tau_min] = interpolated_sources[index_tau] * rescaling * ptr->lcmb_rescale * pow(ptr->k[index_md][index_q]/ptr->lcmb_pivot,ptr->lcmb_tilt); /* store value of (tau0-tau) */ tau0_minus_tau[index_tau-index_tau_min] = tau0 - tau; } /* Compute trapezoidal weights for integration over tau */ class_call(array_trapezoidal_mweights(tau0_minus_tau, tau_size, w_trapz, ptr->error_message), ptr->error_message, ptr->error_message); } /* density source: redefine the time sampling, multiply by coefficient of Poisson equation, and multiply by selection function */ if ((_index_tt_in_range_(ptr->index_tt_density, ppt->selection_num, ppt->has_nc_density)) || (_index_tt_in_range_(ptr->index_tt_rsd, ppt->selection_num, ppt->has_nc_rsd)) || (_index_tt_in_range_(ptr->index_tt_d0, ppt->selection_num, ppt->has_nc_rsd)) || (_index_tt_in_range_(ptr->index_tt_d1, ppt->selection_num, ppt->has_nc_rsd)) || (_index_tt_in_range_(ptr->index_tt_nc_g1, ppt->selection_num, ppt->has_nc_gr)) || (_index_tt_in_range_(ptr->index_tt_nc_g2, ppt->selection_num, ppt->has_nc_gr)) || (_index_tt_in_range_(ptr->index_tt_nc_g3, ppt->selection_num, ppt->has_nc_gr)) ) { /* bin number associated to particular redshift bin and selection function */ if (_index_tt_in_range_(ptr->index_tt_density, ppt->selection_num, ppt->has_nc_density)) bin = index_tt - ptr->index_tt_density; if (_index_tt_in_range_(ptr->index_tt_rsd, ppt->selection_num, ppt->has_nc_rsd)) bin = index_tt - ptr->index_tt_rsd; if (_index_tt_in_range_(ptr->index_tt_d0, ppt->selection_num, ppt->has_nc_rsd)) bin = index_tt - ptr->index_tt_d0; if (_index_tt_in_range_(ptr->index_tt_d1, ppt->selection_num, ppt->has_nc_rsd)) bin = index_tt - ptr->index_tt_d1; if (_index_tt_in_range_(ptr->index_tt_nc_g1, ppt->selection_num, ppt->has_nc_gr)) bin = index_tt - ptr->index_tt_nc_g1; if (_index_tt_in_range_(ptr->index_tt_nc_g2, ppt->selection_num, ppt->has_nc_gr)) bin = index_tt - ptr->index_tt_nc_g2; if (_index_tt_in_range_(ptr->index_tt_nc_g3, ppt->selection_num, ppt->has_nc_gr)) bin = index_tt - ptr->index_tt_nc_g3; /* allocate temporary arrays for storing sources and for calling background */ class_alloc(selection,tau_size*sizeof(double),ptr->error_message); class_alloc(pvecback,pba->bg_size*sizeof(double),ptr->error_message); /* redefine the time sampling */ class_call(transfer_selection_sampling(ppr, pba, ppt, ptr, bin, tau0_minus_tau, tau_size), ptr->error_message, ptr->error_message); /* resample the source at those times */ class_call(transfer_source_resample(ppr, pba, ppt, ptr, bin, tau0_minus_tau, tau_size, index_md, tau0, interpolated_sources, sources), ptr->error_message, ptr->error_message); /* Compute trapezoidal weights for integration over tau */ class_call(array_trapezoidal_mweights(tau0_minus_tau, tau_size, w_trapz, ptr->error_message), ptr->error_message, ptr->error_message); /* compute values of selection function at sampled values of tau */ class_call(transfer_selection_compute(ppr, pba, ppt, ptr, selection, tau0_minus_tau, w_trapz, tau_size, pvecback, tau0, bin), ptr->error_message, ptr->error_message); /* loop over time and rescale */ for (index_tau = 0; index_tau < tau_size; index_tau++) { /* conformal time */ tau = tau0 - tau0_minus_tau[index_tau]; /* corresponding background quantities */ class_call(background_at_tau(pba, tau, pba->long_info, pba->inter_normal, &last_index, pvecback), pba->error_message, ptr->error_message); /* Source evolution, used by number counf rsd and number count gravity terms */ if ((_index_tt_in_range_(ptr->index_tt_d1, ppt->selection_num, ppt->has_nc_rsd)) || (_index_tt_in_range_(ptr->index_tt_nc_g2, ppt->selection_num, ppt->has_nc_gr))) { if((ptr->has_nz_evo_file == _TRUE_) || (ptr->has_nz_evo_analytic == _TRUE_)){ f_evo = 2./pvecback[pba->index_bg_H]/pvecback[pba->index_bg_a]/tau0_minus_tau[index_tau] + pvecback[pba->index_bg_H_prime]/pvecback[pba->index_bg_H]/pvecback[pba->index_bg_H]/pvecback[pba->index_bg_a]; z = pba->a_today/pvecback[pba->index_bg_a]-1.; if (ptr->has_nz_evo_file ==_TRUE_) { class_test((z<ptr->nz_evo_z[0]) || (z>ptr->nz_evo_z[ptr->nz_evo_size-1]), ptr->error_message, "Your input file for the selection function only covers the redhsift range [%f : %f]. However, your input for the selection function requires z=%f", ptr->nz_evo_z[0], ptr->nz_evo_z[ptr->nz_evo_size-1], z); class_call(array_interpolate_spline( ptr->nz_evo_z, ptr->nz_evo_size, ptr->nz_evo_dlog_nz, ptr->nz_evo_dd_dlog_nz, 1, z, &last_index, &dln_dNdz_dz, 1, ptr->error_message), ptr->error_message, ptr->error_message); } else { class_call(transfer_dNdz_analytic(ptr, z, &dNdz, &dln_dNdz_dz), ptr->error_message, ptr->error_message); } f_evo -= dln_dNdz_dz/pvecback[pba->index_bg_a]; } else { f_evo = 0.; } } /* matter density source = [- (dz/dtau) W(z)] * delta_m(k,tau) = W(tau) delta_m(k,tau) with delta_m = total matter perturbation (defined in gauge-independent way, see arXiv 1307.1459) W(z) = redshift space selection function = dN/dz W(tau) = same wrt conformal time = dN/dtau (in tau = tau_0, set source = 0 to avoid division by zero; regulated anyway by Bessel). */ if (_index_tt_in_range_(ptr->index_tt_density, ppt->selection_num, ppt->has_nc_density)) rescaling = ptr->bias*selection[index_tau]; /* redhsift space distorsion source = - [- (dz/dtau) W(z)] * (k/H) * theta(k,tau) */ if (_index_tt_in_range_(ptr->index_tt_rsd, ppt->selection_num, ppt->has_nc_rsd)) rescaling = selection[index_tau]/pvecback[pba->index_bg_H]/pvecback[pba->index_bg_a]; if (_index_tt_in_range_(ptr->index_tt_d0, ppt->selection_num, ppt->has_nc_rsd)) rescaling = -3.*selection[index_tau]*pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a] /ptr->k[index_md][index_q]/ptr->k[index_md][index_q]; if (_index_tt_in_range_(ptr->index_tt_d1, ppt->selection_num, ppt->has_nc_rsd)) rescaling = selection[index_tau]*(1. +pvecback[pba->index_bg_H_prime] /pvecback[pba->index_bg_a] /pvecback[pba->index_bg_H] /pvecback[pba->index_bg_H] +(2.-5.*ptr->s_bias) /tau0_minus_tau[index_tau] /pvecback[pba->index_bg_a] /pvecback[pba->index_bg_H] +5.*ptr->s_bias -f_evo )/ptr->k[index_md][index_q]; if (_index_tt_in_range_(ptr->index_tt_nc_g1, ppt->selection_num, ppt->has_nc_gr)) rescaling = selection[index_tau]; if (_index_tt_in_range_(ptr->index_tt_nc_g2, ppt->selection_num, ppt->has_nc_gr)) rescaling = -selection[index_tau]*(3. +pvecback[pba->index_bg_H_prime] /pvecback[pba->index_bg_a] /pvecback[pba->index_bg_H] /pvecback[pba->index_bg_H] +(2.-5.*ptr->s_bias) /tau0_minus_tau[index_tau] /pvecback[pba->index_bg_a] /pvecback[pba->index_bg_H] -f_evo ); if (_index_tt_in_range_(ptr->index_tt_nc_g3, ppt->selection_num, ppt->has_nc_gr)) rescaling = selection[index_tau]/pvecback[pba->index_bg_a]/pvecback[pba->index_bg_H]; sources[index_tau] *= rescaling; } /* deallocate temporary arrays */ free(pvecback); free(selection); } /* lensing potential: eliminate early times, and multiply by selection function */ if ((_index_tt_in_range_(ptr->index_tt_lensing, ppt->selection_num, ppt->has_cl_lensing_potential)) || (_index_tt_in_range_(ptr->index_tt_nc_lens, ppt->selection_num, ppt->has_nc_lens)) || (_index_tt_in_range_(ptr->index_tt_nc_g4, ppt->selection_num, ppt->has_nc_gr)) || (_index_tt_in_range_(ptr->index_tt_nc_g5, ppt->selection_num, ppt->has_nc_gr)) ) { /* bin number associated to particular redshift bin and selection function */ if (_index_tt_in_range_(ptr->index_tt_lensing, ppt->selection_num, ppt->has_cl_lensing_potential)) bin = index_tt - ptr->index_tt_lensing; if (_index_tt_in_range_(ptr->index_tt_nc_lens, ppt->selection_num, ppt->has_nc_lens)) bin = index_tt - ptr->index_tt_nc_lens; if (_index_tt_in_range_(ptr->index_tt_nc_g4, ppt->selection_num, ppt->has_nc_gr)) bin = index_tt - ptr->index_tt_nc_g4; if (_index_tt_in_range_(ptr->index_tt_nc_g5, ppt->selection_num, ppt->has_nc_gr)) bin = index_tt - ptr->index_tt_nc_g5; /* allocate temporary arrays for storing sources and for calling background */ class_alloc(pvecback, pba->bg_size*sizeof(double), ptr->error_message); /* dirac case */ if (ppt->selection == dirac) { tau_sources_size=1; } /* other cases (gaussian, tophat...) */ else { tau_sources_size=ppr->selection_sampling; } class_alloc(selection, tau_sources_size*sizeof(double), ptr->error_message); class_alloc(tau0_minus_tau_lensing_sources, tau_sources_size*sizeof(double), ptr->error_message); class_alloc(w_trapz_lensing_sources, tau_sources_size*sizeof(double), ptr->error_message); /* time sampling for source selection function */ class_call(transfer_selection_sampling(ppr, pba, ppt, ptr, bin, tau0_minus_tau_lensing_sources, tau_sources_size), ptr->error_message, ptr->error_message); /* Compute trapezoidal weights for integration over tau */ class_call(array_trapezoidal_mweights(tau0_minus_tau_lensing_sources, tau_sources_size, w_trapz_lensing_sources, ptr->error_message), ptr->error_message, ptr->error_message); /* compute values of selection function at sampled values of tau */ class_call(transfer_selection_compute(ppr, pba, ppt, ptr, selection, tau0_minus_tau_lensing_sources, w_trapz_lensing_sources, tau_sources_size, pvecback, tau0, bin), ptr->error_message, ptr->error_message); /* redefine the time sampling */ class_call(transfer_lensing_sampling(ppr, pba, ppt, ptr, bin, tau0, tau0_minus_tau, tau_size), ptr->error_message, ptr->error_message); /* resample the source at those times */ class_call(transfer_source_resample(ppr, pba, ppt, ptr, bin, tau0_minus_tau, tau_size, index_md, tau0, interpolated_sources, sources), ptr->error_message, ptr->error_message); /* Compute trapezoidal weights for integration over tau */ class_call(array_trapezoidal_mweights(tau0_minus_tau, tau_size, w_trapz, ptr->error_message), ptr->error_message, ptr->error_message); /* loop over time and rescale */ for (index_tau = 0; index_tau < tau_size; index_tau++) { /* lensing source = - W(tau) (phi(k,tau) + psi(k,tau)) Heaviside(tau-tau_rec) with psi,phi = metric perturbation in newtonian gauge (phi+psi = Phi_A-Phi_H of Bardeen) W = (tau-tau_rec)/(tau_0-tau)/(tau_0-tau_rec) H(x) = Heaviside (in tau = tau_0, set source = 0 to avoid division by zero; regulated anyway by Bessel). */ if (index_tau == tau_size-1) { rescaling=0.; } else { rescaling = 0.; for (index_tau_sources=0; index_tau_sources < tau_sources_size; index_tau_sources++) { /* condition for excluding from the sum the sources located in z=zero */ if ((tau0_minus_tau_lensing_sources[index_tau_sources] > 0.) && (tau0_minus_tau_lensing_sources[index_tau_sources]-tau0_minus_tau[index_tau] > 0.)) { if (_index_tt_in_range_(ptr->index_tt_lensing, ppt->selection_num, ppt->has_cl_lensing_potential)) { rescaling += (2.-5.*ptr->s_bias)/2. *(tau0_minus_tau[index_tau]-tau0_minus_tau_lensing_sources[index_tau_sources]) /tau0_minus_tau[index_tau] /tau0_minus_tau_lensing_sources[index_tau_sources] * selection[index_tau_sources] * w_trapz_lensing_sources[index_tau_sources]; } if (_index_tt_in_range_(ptr->index_tt_nc_lens, ppt->selection_num, ppt->has_nc_lens)) { rescaling += -(2.-5.*ptr->s_bias)/2. *(tau0_minus_tau[index_tau]-tau0_minus_tau_lensing_sources[index_tau_sources]) /tau0_minus_tau[index_tau] /tau0_minus_tau_lensing_sources[index_tau_sources] * selection[index_tau_sources] * w_trapz_lensing_sources[index_tau_sources]; } if (_index_tt_in_range_(ptr->index_tt_nc_g4, ppt->selection_num, ppt->has_nc_gr)) { rescaling += (2.-5.*ptr->s_bias) /tau0_minus_tau_lensing_sources[index_tau_sources] * selection[index_tau_sources] * w_trapz_lensing_sources[index_tau_sources]; } if (_index_tt_in_range_(ptr->index_tt_nc_g5, ppt->selection_num, ppt->has_nc_gr)) { /* background quantities at time tau_lensing_source */ class_call(background_at_tau(pba, tau0-tau0_minus_tau_lensing_sources[index_tau_sources], pba->long_info, pba->inter_normal, &last_index, pvecback), pba->error_message, ptr->error_message); /* Source evolution at time tau_lensing_source */ if ((ptr->has_nz_evo_file == _TRUE_) || (ptr->has_nz_evo_analytic == _TRUE_)) { f_evo = 2./pvecback[pba->index_bg_H]/pvecback[pba->index_bg_a]/tau0_minus_tau[index_tau] + pvecback[pba->index_bg_H_prime]/pvecback[pba->index_bg_H]/pvecback[pba->index_bg_H]/pvecback[pba->index_bg_a]; z = pba->a_today/pvecback[pba->index_bg_a]-1.; if (ptr->has_nz_evo_file == _TRUE_) { class_test((z<ptr->nz_evo_z[0]) || (z>ptr->nz_evo_z[ptr->nz_evo_size-1]), ptr->error_message, "Your input file for the selection function only covers the redhsift range [%f : %f]. However, your input for the selection function requires z=%f", ptr->nz_evo_z[0], ptr->nz_evo_z[ptr->nz_evo_size-1], z); class_call(array_interpolate_spline( ptr->nz_evo_z, ptr->nz_evo_size, ptr->nz_evo_dlog_nz, ptr->nz_evo_dd_dlog_nz, 1, z, &last_index, &dln_dNdz_dz, 1, ptr->error_message), ptr->error_message, ptr->error_message); } else { class_call(transfer_dNdz_analytic(ptr, z, &dNdz, &dln_dNdz_dz), ptr->error_message, ptr->error_message); } f_evo -= dln_dNdz_dz/pvecback[pba->index_bg_a]; } else { f_evo = 0.; } rescaling += (1. + pvecback[pba->index_bg_H_prime] /pvecback[pba->index_bg_a] /pvecback[pba->index_bg_H] /pvecback[pba->index_bg_H] + (2.-5.*ptr->s_bias) /tau0_minus_tau_lensing_sources[index_tau_sources] /pvecback[pba->index_bg_a] /pvecback[pba->index_bg_H] + 5.*ptr->s_bias - f_evo) * ptr->k[index_md][index_q] * selection[index_tau_sources] * w_trapz_lensing_sources[index_tau_sources]; } } } } /* copy from input array to output array */ sources[index_tau] *= rescaling; } /* deallocate temporary arrays */ free(pvecback); free(selection); free(tau0_minus_tau_lensing_sources); free(w_trapz_lensing_sources); } } } /* case where we do not need to redefine */ else { /* number of sampled time values */ tau_size = ppt->tau_size; /* plain copy from input array to output array */ memcpy(sources, interpolated_sources, ppt->tau_size*sizeof(double)); /* store values of (tau0-tau) */ for (index_tau=0; index_tau < ppt->tau_size; index_tau++) { tau0_minus_tau[index_tau] = tau0 - ppt->tau_sampling[index_tau]; } /* Compute trapezoidal weights for integration over tau */ class_call(array_trapezoidal_mweights(tau0_minus_tau, tau_size, w_trapz, ptr->error_message), ptr->error_message, ptr->error_message); } /* return tau_size value that will be stored in the workspace (the workspace wants a double) */ *tau_size_out = tau_size; return _SUCCESS_; } /** * arbitrarily normalized selection function dN/dz(z,bin) * * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfers structure * @param bin Input : redshift bin number * @param z Input : one value of redshift * @param selection Output: pointer to selection function * @return the error status */ int transfer_selection_function( struct precision * ppr, struct perturbs * ppt, struct transfers * ptr, int bin, double z, double * selection) { double x; double dNdz; double dln_dNdz_dz; int last_index; /* trivial dirac case */ if (ppt->selection==dirac) { *selection=1.; return _SUCCESS_; } /* difference between z and the bin center (we can take the absolute value as long as all selection functions are symmetric around x=0) */ x=fabs(z-ppt->selection_mean[bin]); /* gaussian case (the function is anyway normalized later automatically, but could not resist to normalize it already here) */ if (ppt->selection==gaussian) { *selection = exp(-0.5*pow(x/ppt->selection_width[bin],2)) /ppt->selection_width[bin]/sqrt(2.*_PI_); if ((ptr->has_nz_file == _TRUE_) || (ptr->has_nz_analytic == _TRUE_)) { if (ptr->has_nz_file == _TRUE_) { class_test((z<ptr->nz_z[0]) || (z>ptr->nz_z[ptr->nz_size-1]), ptr->error_message, "Your input file for the selection function only covers the redhsift range [%f : %f]. However, your input for the selection function requires z=%f", ptr->nz_z[0], ptr->nz_z[ptr->nz_size-1], z); class_call(array_interpolate_spline( ptr->nz_z, ptr->nz_size, ptr->nz_nz, ptr->nz_ddnz, 1, z, &last_index, &dNdz, 1, ptr->error_message), ptr->error_message, ptr->error_message); } else { class_call(transfer_dNdz_analytic(ptr, z, &dNdz, &dln_dNdz_dz), ptr->error_message, ptr->error_message); } *selection *= dNdz; } return _SUCCESS_; } /* top-hat case, with smoothed edges. The problem with sharp edges is that the final result will be affected by random noise. Indeed, the values of k at which the transfer functions Delta_l(k) are sampled will never coicide with the actual edges of the true transfer function (computed with or even without the Limber approximation). Hence the integral Cl=\int dk Delta_l(k)**2 (...) will be unprecise and will fluctuate randomly with the resolution along k. With smooth edges, the problem is sloved, and the final Cls become mildly dependent on the resolution along k. */ if (ppt->selection==tophat) { /* selection function, centered on z=mean (i.e. on x=0), equal to one around x=0, with tanh step centered on x=width, of width delta x = 0.1*width */ *selection=(1.-tanh((x-ppt->selection_width[bin])/(ppr->selection_tophat_edge*ppt->selection_width[bin])))/2.; if ((ptr->has_nz_file == _TRUE_) || (ptr->has_nz_analytic == _TRUE_)) { if (ptr->has_nz_file == _TRUE_) { class_call(array_interpolate_spline( ptr->nz_z, ptr->nz_size, ptr->nz_nz, ptr->nz_ddnz, 1, z, &last_index, &dNdz, 1, ptr->error_message), ptr->error_message, ptr->error_message); } else { class_call(transfer_dNdz_analytic(ptr, z, &dNdz, &dln_dNdz_dz), ptr->error_message, ptr->error_message); } *selection *= dNdz; } return _SUCCESS_; } /* get here only if selection type was recognized */ class_stop(ptr->error_message, "invalid choice of selection function"); return _SUCCESS_; } /** * Analytic form for dNdz distribution, from arXiv:1004.4640 * * @param ptr Input: pointer to transfer structure * @param z Input: redshift * @param dNdz Output: density per redshift, dN/dZ * @param dln_dNdz_dz Output: dln(dN/dz)/dz, used optionally for the source evolution * @return the error status */ int transfer_dNdz_analytic( struct transfers * ptr, double z, double * dNdz, double * dln_dNdz_dz) { /* Implement here your favorite analytic ansatz for the selection function. Typical function for photometric sample: dN/dz = (z/z0)^alpha exp[-(z/z0)^beta]. Then: dln(dN/dz)/dz = (alpha - beta*(z/z0)^beta)/z. In principle, one is free to use different ansaztz for the selection function and the evolution function. Since the selection function uses only dN/dz, while the evolution uses only dln(dN/dz)/dz, it is possible to use different functions for dN/dz and dln(dN/dz)/dz */ double z0,alpha,beta; z0 = 0.55; alpha = 2.0; beta = 1.5; *dNdz = pow(z/z0,alpha) * exp(-pow(z/z0,beta)); *dln_dNdz_dz = (alpha - pow(z/z0,beta)*beta)/z; return _SUCCESS_; } /** * for sources that need to be mutiplied by a selection function, * redefine a finer time sampling in a small range * * @param ppr Input : pointer to precision structure * @param pba Input : pointer to background structure * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfers structure * @param bin Input : redshift bin number * @param tau0_minus_tau Output: values of (tau0-tau) at which source are sample * @param tau_size Output: pointer to size of previous array * @param index_md Input : index of mode * @param tau0 Input : time today * @param interpolated_sources Input : interpolated perturbation source * @param sources Output: resampled transfer source * @return the error status */ int transfer_selection_sampling( struct precision * ppr, struct background * pba, struct perturbs * ppt, struct transfers * ptr, int bin, double * tau0_minus_tau, int tau_size) { /* running index on time */ int index_tau; /* minimum and maximal value of time in new sampled interval */ double tau_min,tau_mean,tau_max; /* time interval for this bin */ class_call(transfer_selection_times(ppr, pba, ppt, ptr, bin, &tau_min, &tau_mean, &tau_max), ptr->error_message, ptr->error_message); /* case selection == dirac */ if (tau_min == tau_max) { class_test(tau_size !=1, ptr->error_message, "for Dirac selection function tau_size should be 1, not %d",tau_size); tau0_minus_tau[0] = pba->conformal_age - tau_mean; } /* for other cases (gaussian, tophat...) define new sampled values of (tau0-tau) with even spacing */ else { for (index_tau=0; index_tau<tau_size; index_tau++) { tau0_minus_tau[index_tau]=pba->conformal_age-tau_min-((double)index_tau)/((double)tau_size-1.)*(tau_max-tau_min); } } return _SUCCESS_; } /** * for lensing sources that need to be convolved with a selection * function, redefine the sampling within the range extending from the * tau_min of the selection function up to tau0 * * @param ppr Input : pointer to precision structure * @param pba Input : pointer to background structure * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfers structure * @param bin Input : redshift bin number * @param tau0_minus_tau Output: values of (tau0-tau) at which source are sample * @param tau_size Output: pointer to size of previous array * @param index_md Input : index of mode * @param tau0 Input : time today * @param interpolated_sources Input : interpolated perturbation source * @param sources Output: resampled transfer source * @return the error status */ int transfer_lensing_sampling( struct precision * ppr, struct background * pba, struct perturbs * ppt, struct transfers * ptr, int bin, double tau0, double * tau0_minus_tau, int tau_size) { /* running index on time */ int index_tau; /* minimum and maximal value of time in new sampled interval */ double tau_min,tau_mean,tau_max; /* time interval for this bin */ class_call(transfer_selection_times(ppr, pba, ppt, ptr, bin, &tau_min, &tau_mean, &tau_max), ptr->error_message, ptr->error_message); for (index_tau=0; index_tau<tau_size; index_tau++) { //tau0_minus_tau[index_tau]=pba->conformal_age-tau_min-((double)index_tau)/((double)tau_size-1.)*(tau0-tau_min); tau0_minus_tau[index_tau]=((double)(tau_size-1-index_tau))/((double)(tau_size-1))*(tau0-tau_min); } return _SUCCESS_; } /** * for sources that need to be mutiplied by a selection function, * redefine a finer time sampling in a small range, and resample the * perturbation sources at the new value by linear interpolation * * @param ppr Input : pointer to precision structure * @param pba Input : pointer to background structure * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfers structure * @param bin Input : redshift bin number * @param tau0_minus_tau Output: values of (tau0-tau) at which source are sample * @param tau_size Output: pointer to size of previous array * @param index_md Input : index of mode * @param tau0 Input : time today * @param interpolated_sources Input : interpolated perturbation source * @param sources Output: resampled transfer source * @return the error status */ int transfer_source_resample( struct precision * ppr, struct background * pba, struct perturbs * ppt, struct transfers * ptr, int bin, double * tau0_minus_tau, int tau_size, int index_md, double tau0, double * interpolated_sources, double * sources) { /* running index on time */ int index_tau; /* array of values of source */ double * source_at_tau; /* array of source values for a given time and for all k's */ class_alloc(source_at_tau, sizeof(double), ptr->error_message); /* interpolate the sources linearily at the new time values */ for (index_tau=0; index_tau<tau_size; index_tau++) { class_call(array_interpolate_two(ppt->tau_sampling, 1, 0, interpolated_sources, 1, ppt->tau_size, tau0-tau0_minus_tau[index_tau], source_at_tau, 1, ptr->error_message), ptr->error_message, ptr->error_message); /* copy the new values in the output sources array */ sources[index_tau] = source_at_tau[0]; } /* deallocate the temporary array */ free(source_at_tau); return _SUCCESS_; } /** * for each selection function, compute the min, mean and max values * of conformal time (associated to the min, mean and max values of * redshift specified by the user) * * @param ppr Input : pointer to precision structure * @param pba Input : pointer to background structure * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfers structure * @param bin Input : redshift bin number * @param tau_min Output: smallest time in the selection interval * @param tau_mean Output: time corresponding to z_mean * @param tau_max Output: largest time in the selection interval * @return the error status */ int transfer_selection_times( struct precision * ppr, struct background * pba, struct perturbs * ppt, struct transfers * ptr, int bin, double * tau_min, double * tau_mean, double * tau_max) { /* a value of redshift */ double z=0.; /* lower edge of time interval for this bin */ if (ppt->selection==gaussian) { z = ppt->selection_mean[bin]+ppt->selection_width[bin]*ppr->selection_cut_at_sigma; } if (ppt->selection==tophat) { z = ppt->selection_mean[bin]+(1.+ppr->selection_cut_at_sigma*ppr->selection_tophat_edge)*ppt->selection_width[bin]; } if (ppt->selection==dirac) { z = ppt->selection_mean[bin]; } class_call(background_tau_of_z(pba, z, tau_min), pba->error_message, ppt->error_message); /* higher edge of time interval for this bin */ if (ppt->selection==gaussian) { z = MAX(ppt->selection_mean[bin]-ppt->selection_width[bin]*ppr->selection_cut_at_sigma,0.); } if (ppt->selection==tophat) { z = MAX(ppt->selection_mean[bin]-(1.+ppr->selection_cut_at_sigma*ppr->selection_tophat_edge)*ppt->selection_width[bin],0.); } if (ppt->selection==dirac) { z = ppt->selection_mean[bin]; } class_call(background_tau_of_z(pba, z, tau_max), pba->error_message, ppt->error_message); /* central value of time interval for this bin */ z = MAX(ppt->selection_mean[bin],0.); class_call(background_tau_of_z(pba, z, tau_mean), pba->error_message, ppt->error_message); return _SUCCESS_; } /** * compute and normalise selection function for a set of time values * * * @param pba Input : pointer to background structure * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfers structure * @param selection Output: normalized selection function * @param tau0_minus_tau Input : values of (tau0-tau) at which source are sample * @param w_trapz Input : trapezoidal weights for integration over tau * @param tau_size Input : size of previous two arrays * @param pvecback Input : allocated array of background values * @param tau_0 Input : time today * @param bin Input : redshift bin number * @return the error status */ int transfer_selection_compute( struct precision * ppr, struct background * pba, struct perturbs * ppt, struct transfers * ptr, double * selection, double * tau0_minus_tau, double * w_trapz, int tau_size, double * pvecback, double tau0, int bin) { /* running index over time */ int index_tau; /* running value of time */ double tau; /* used for normalizing the selection to one */ double norm; /* used for calling background_at_tau() */ int last_index; /* runnign value of redshift */ double z; if (tau_size > 1) { /* loop over time */ for (index_tau = 0; index_tau < tau_size; index_tau++) { /* running value of time */ tau = tau0 - tau0_minus_tau[index_tau]; /* get background quantitites at this time */ class_call(background_at_tau(pba, tau, pba->long_info, pba->inter_normal, &last_index, pvecback), pba->error_message, ptr->error_message); /* infer redhsift */ z = pba->a_today/pvecback[pba->index_bg_a]-1.; /* get corresponding dN/dz(z,bin) */ class_call(transfer_selection_function(ppr, ppt, ptr, bin, z, &(selection[index_tau])), ptr->error_message, ptr->error_message); /* get corresponding dN/dtau = dN/dz * dz/dtau = dN/dz * H */ selection[index_tau] *= pvecback[pba->index_bg_H]; } /* compute norm = \int W(tau) dtau */ class_call(array_trapezoidal_integral(selection, tau_size, w_trapz, &norm, ptr->error_message), ptr->error_message, ptr->error_message); /* divide W by norm so that \int W(tau) dtau = 1 */ for (index_tau = 0; index_tau < tau_size; index_tau++) { selection[index_tau]/=norm; } } /* trivial case: dirac distribution */ else { selection[0] = 1.; } return _SUCCESS_; } /** * This routine computes the transfer functions \f$ \Delta_l^{X} (k) \f$) * as a function of wavenumber k for a given mode, initial condition, * type and multipole l passed in input. * * For a given value of k, the transfer function is infered from * the source function (passed in input in the array interpolated_sources) * and from Bessel functions (passed in input in the bessels structure), * either by convolving them along tau, or by a Limber appoximation. * This elementary task is distributed either to transfer_integrate() * or to transfer_limber(). The task of this routine is mainly to * loop over k values, and to decide at which k_max the calculation can * be stopped, according to some approximation scheme designed to find a * compromise between execution time and precision. The approximation scheme * is defined by parameters in bthe precision structure. * * @param ppr Input : pointer to precision structure * @param ppt Input : pointer to perturbation structure * @param ptr Input/output : pointer to transfers structure (result stored there) * @param tau0 Input : conformal time today * @param tau_rec Input : conformal time at recombination * @param index_md Input : index of mode * @param index_ic Input : index of initial condition * @param index_tt Input : index of type of transfer * @param index_l Input : index of multipole * @param interpolated_sources Input : array containing the sources * @param ptw Input : pointer to transfer_workspace structure (allocated in transfer_init() to avoid numerous reallocation) * @return the error status */ int transfer_compute_for_each_l( struct transfer_workspace * ptw, struct precision * ppr, struct perturbs * ppt, struct transfers * ptr, int index_q, int index_md, int index_ic, int index_tt, int index_l, double l, double q_max_bessel, radial_function_type radial_type ){ /** Summary: */ /** - define local variables */ /* current wavenumber value */ double q,k; /* value of transfer function */ double transfer_function; /* whether to use the Limber approximation */ short use_limber; /* return zero tranbsfer function if l is above l_max */ if (index_l >= ptr->l_size_tt[index_md][index_tt]) { ptr->transfer[index_md][((index_ic * ptr->tt_size[index_md] + index_tt) * ptr->l_size[index_md] + index_l) * ptr->q_size + index_q] = 0.; return _SUCCESS_; } q = ptr->q[index_q]; k = ptr->k[index_md][index_q]; if (ptr->transfer_verbose > 3) printf("Compute transfer for l=%d type=%d\n",(int)l,index_tt); class_call(transfer_use_limber(ppr, ppt, ptr, q_max_bessel, index_md, index_tt, q, l, &use_limber), ptr->error_message, ptr->error_message); if (use_limber == _TRUE_) { class_call(transfer_limber(ptr, ptw, index_md, index_q, l, q, radial_type, &transfer_function), ptr->error_message, ptr->error_message); } else { class_call(transfer_integrate( ppt, ptr, ptw, index_q, index_md, index_tt, l, index_l, k, radial_type, &transfer_function ), ptr->error_message, ptr->error_message); } /* store transfer function in transfer structure */ ptr->transfer[index_md][((index_ic * ptr->tt_size[index_md] + index_tt) * ptr->l_size[index_md] + index_l) * ptr->q_size + index_q] = transfer_function; return _SUCCESS_; } int transfer_use_limber( struct precision * ppr, struct perturbs * ppt, struct transfers * ptr, double q_max_bessel, int index_md, int index_tt, double q, double l, short * use_limber) { /* criterium for chosing between integration and Limber must be implemented here */ *use_limber = _FALSE_; if (q>q_max_bessel) { *use_limber = _TRUE_; } else { if (_scalars_) { //TBC: in principle the Limber condition should be adapted to account for curvature effects if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (index_tt == ptr->index_tt_lcmb) && (l>ppr->l_switch_limber)) { *use_limber = _TRUE_; } if (_index_tt_in_range_(ptr->index_tt_density, ppt->selection_num, ppt->has_nc_density) && (l>=ppr->l_switch_limber_for_cl_density_over_z*ppt->selection_mean[index_tt-ptr->index_tt_density])) { if (ppt->selection != dirac) *use_limber = _TRUE_; } if (_index_tt_in_range_(ptr->index_tt_rsd, ppt->selection_num, ppt->has_nc_rsd) && (l>=ppr->l_switch_limber_for_cl_density_over_z*ppt->selection_mean[index_tt-ptr->index_tt_rsd])) { if (ppt->selection != dirac) *use_limber = _TRUE_; } if (_index_tt_in_range_(ptr->index_tt_d0, ppt->selection_num, ppt->has_nc_rsd) && (l>=ppr->l_switch_limber_for_cl_density_over_z*ppt->selection_mean[index_tt-ptr->index_tt_d0])) { if (ppt->selection != dirac) *use_limber = _TRUE_; } if (_index_tt_in_range_(ptr->index_tt_d1, ppt->selection_num, ppt->has_nc_rsd) && (l>=ppr->l_switch_limber_for_cl_density_over_z*ppt->selection_mean[index_tt-ptr->index_tt_d1])) { if (ppt->selection != dirac) *use_limber = _TRUE_; } if (_index_tt_in_range_(ptr->index_tt_nc_lens, ppt->selection_num, ppt->has_nc_lens) && (l>=ppr->l_switch_limber_for_cl_density_over_z*ppt->selection_mean[index_tt-ptr->index_tt_nc_lens])) { if (ppt->selection != dirac) *use_limber = _TRUE_; } if (_index_tt_in_range_(ptr->index_tt_nc_g1, ppt->selection_num, ppt->has_nc_gr) && (l>=ppr->l_switch_limber_for_cl_density_over_z*ppt->selection_mean[index_tt-ptr->index_tt_nc_g1])) { if (ppt->selection != dirac) *use_limber = _TRUE_; } if (_index_tt_in_range_(ptr->index_tt_nc_g2, ppt->selection_num, ppt->has_nc_gr) && (l>=ppr->l_switch_limber_for_cl_density_over_z*ppt->selection_mean[index_tt-ptr->index_tt_nc_g2])) { if (ppt->selection != dirac) *use_limber = _TRUE_; } if (_index_tt_in_range_(ptr->index_tt_nc_g3, ppt->selection_num, ppt->has_nc_gr) && (l>=ppr->l_switch_limber_for_cl_density_over_z*ppt->selection_mean[index_tt-ptr->index_tt_nc_g3])) { if (ppt->selection != dirac) *use_limber = _TRUE_; } if (_index_tt_in_range_(ptr->index_tt_nc_g4, ppt->selection_num, ppt->has_nc_gr) && (l>=ppr->l_switch_limber_for_cl_density_over_z*ppt->selection_mean[index_tt-ptr->index_tt_nc_g4])) { if (ppt->selection != dirac) *use_limber = _TRUE_; } if (_index_tt_in_range_(ptr->index_tt_nc_g5, ppt->selection_num, ppt->has_nc_gr) && (l>=ppr->l_switch_limber_for_cl_density_over_z*ppt->selection_mean[index_tt-ptr->index_tt_nc_g5])) { if (ppt->selection != dirac) *use_limber = _TRUE_; } if (_index_tt_in_range_(ptr->index_tt_lensing, ppt->selection_num, ppt->has_cl_lensing_potential) && (l>=ppr->l_switch_limber_for_cl_density_over_z*ppt->selection_mean[index_tt-ptr->index_tt_lensing])) { *use_limber = _TRUE_; } } } return _SUCCESS_; } /** * This routine computes the transfer functions \f$ \Delta_l^{X} (k) \f$) * for each mode, initial condition, type, multipole l and wavenumber k, * by convolving the source function (passed in input in the array * interpolated_sources) with Bessel functions (passed in input in the * bessels structure). * * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfers structure * @param tau0 Input : conformal time today * @param tau_rec Input : conformal time at recombination * @param index_md Input : index of mode * @param index_tt Input : index of type * @param index_l Input : index of multipole * @param index_q Input : index of wavenumber * @param interpolated_sources Input: array of interpolated sources * @param ptw Input : pointer to transfer_workspace structure (allocated in transfer_init() to avoid numerous reallocation) * @param trsf Output: transfer function \f$ \Delta_l(k) \f$ * @return the error status */ int transfer_integrate( struct perturbs * ppt, struct transfers * ptr, struct transfer_workspace *ptw, int index_q, int index_md, int index_tt, double l, int index_l, double k, radial_function_type radial_type, double * trsf ) { /** Summary: */ /** - define local variables */ double * tau0_minus_tau = ptw->tau0_minus_tau; double * w_trapz = ptw->w_trapz; double * sources = ptw->sources; /* minimum value of \f$ (\tau0-\tau) \f$ at which \f$ j_l(k[\tau_0-\tau]) \f$ is known, given that \f$ j_l(x) \f$ is sampled above some finite value \f$ x_{\min} \f$ (below which it can be approximated by zero) */ double tau0_minus_tau_min_bessel; /* index in the source's tau list corresponding to the last point in the overlapping region between sources and bessels. Also the index of possible Bessel truncation. */ int index_tau_max, index_tau_max_Bessel; double bessel, *radial_function; double x_turning_point; /** - find minimum value of (tau0-tau) at which \f$ j_l(k[\tau_0-\tau]) \f$ is known, given that \f$ j_l(x) \f$ is sampled above some finite value \f$ x_{\min} \f$ (below which it can be approximated by zero) */ //tau0_minus_tau_min_bessel = x_min_l/k; /* segmentation fault impossible, checked before that k != 0 */ //printf("index_l=%d\n",index_l); if (ptw->sgnK==0){ tau0_minus_tau_min_bessel = ptw->pBIS->chi_at_phimin[index_l]/k; /* segmentation fault impossible, checked before that k != 0 */ } else{ if (index_q < ptr->index_q_flat_approximation) { tau0_minus_tau_min_bessel = ptw->HIS.chi_at_phimin[index_l]/sqrt(ptw->sgnK*ptw->K); } else { tau0_minus_tau_min_bessel = ptw->pBIS->chi_at_phimin[index_l]/sqrt(ptw->sgnK*ptw->K); if (ptw->sgnK == 1) { x_turning_point = asin(sqrt(l*(l+1.))/ptr->q[index_q]*sqrt(ptw->sgnK*ptw->K)); tau0_minus_tau_min_bessel *= x_turning_point/sqrt(l*(l+1.)); } else { x_turning_point = asinh(sqrt(l*(l+1.))/ptr->q[index_q]*sqrt(ptw->sgnK*ptw->K)); tau0_minus_tau_min_bessel *= x_turning_point/sqrt(l*(l+1.)); } } } /** - if there is no overlap between the region in which bessels and sources are non-zero, return zero */ if (tau0_minus_tau_min_bessel >= tau0_minus_tau[0]) { *trsf = 0.; return _SUCCESS_; } /** - if there is an overlap: */ /** -> trivial case: the source is a Dirac function and is sampled in only one point */ if (ptw->tau_size == 1) { class_call(transfer_radial_function( ptw, ppt, ptr, k, index_q, index_l, 1, &bessel, radial_type ), ptr->error_message, ptr->error_message); *trsf = sources[0] * bessel; return _SUCCESS_; } /** -> other cases */ /** (a) find index in the source's tau list corresponding to the last point in the overlapping region. After this step, index_tau_max can be as small as zero, but not negative. */ index_tau_max = ptw->tau_size-1; while (tau0_minus_tau[index_tau_max] < tau0_minus_tau_min_bessel) index_tau_max--; /* Set index so we know if the truncation of the convolution integral is due to Bessel and not due to the source. */ index_tau_max_Bessel = index_tau_max; /** (b) the source function can vanish at large $\f \tau \f$. Check if further points can be eliminated. After this step and if we did not return a null transfer function, index_tau_max can be as small as zero, but not negative. */ while (sources[index_tau_max] == 0.) { index_tau_max--; if (index_tau_max < 0) { *trsf = 0.; return _SUCCESS_; } } if (ptw->neglect_late_source == _TRUE_) { while (tau0_minus_tau[index_tau_max] < ptw->tau0_minus_tau_cut) { index_tau_max--; if (index_tau_max < 0) { *trsf = 0.; return _SUCCESS_; } } } /** Compute the radial function: */ class_alloc(radial_function,sizeof(double)*(index_tau_max+1),ptr->error_message); class_call(transfer_radial_function( ptw, ppt, ptr, k, index_q, index_l, index_tau_max+1, radial_function, radial_type ), ptr->error_message, ptr->error_message); /** Now we do most of the convolution integral: */ class_call(array_trapezoidal_convolution(sources, radial_function, index_tau_max+1, w_trapz, trsf, ptr->error_message), ptr->error_message, ptr->error_message); /** This integral is correct for the case where no truncation has occured. If it has been truncated at some index_tau_max because f[index_tau_max+1]==0, it is still correct. The 'mistake' in using the wrong weight w_trapz[index_tau_max] is exactly compensated by the triangle we miss. However, for the Bessel cut off, we must subtract the wrong triangle and add the correct triangle */ if ((index_tau_max!=(ptw->tau_size-1))&&(index_tau_max==index_tau_max_Bessel)){ //Bessel truncation *trsf -= 0.5*(tau0_minus_tau[index_tau_max+1]-tau0_minus_tau_min_bessel)* radial_function[index_tau_max]*sources[index_tau_max]; } free(radial_function); return _SUCCESS_; } /** * This routine computes the transfer functions \f$ \Delta_l^{X} (k) \f$) * for each mode, initial condition, type, multipole l and wavenumber k, * by using the Limber approximation, i.e by evaluating the source function * (passed in input in the array interpolated_sources) at a single value of * tau (the Bessel function being approximated as a Dirac distribution) * * @param ptr Input : pointer to transfers structure * @param ptw Input : pointer to transfer workspace structure * @param index_md Input : index of mode * @param index_l Input : index of multipole * @param index_q Input : index of wavenumber * @param radial_type Input : type of radial (Bessel) functions to convolve with * @param trsf Output: transfer function \f$ \Delta_l(k) \f$ * @return the error status */ int transfer_limber( struct transfers * ptr, struct transfer_workspace * ptw, int index_md, int index_q, double l, double q, radial_function_type radial_type, double * trsf ){ /** Summary: */ /** - define local variables */ /* interpolated source and its derivatives at this value */ double S, Sp, Sm; double x_limber=0.; double tau0_minus_tau_limber=0.; double IPhiFlat = 0.; if (radial_type == SCALAR_TEMPERATURE_0) { /** - get k, l and infer tau such that k(tau0-tau)=l+1/2; check that tau is in appropriate range */ if (ptw->sgnK == 0) { tau0_minus_tau_limber = (l+0.5)/q; } else if (ptw->sgnK == 1) { x_limber = asin(sqrt(l*(l+1.))/q*sqrt(ptw->K)); tau0_minus_tau_limber = x_limber/sqrt(ptw->K); } else if (ptw->sgnK == -1) { x_limber = asinh((l+0.5)/q*sqrt(-ptw->K)); tau0_minus_tau_limber = x_limber/sqrt(-ptw->K); } if ((tau0_minus_tau_limber > ptw->tau0_minus_tau[0]) || (tau0_minus_tau_limber < ptw->tau0_minus_tau[ptw->tau_size-1])) { *trsf = 0.; return _SUCCESS_; } class_call(transfer_limber_interpolate(ptr, ptw->tau0_minus_tau, ptw->sources, ptw->tau_size, tau0_minus_tau_limber, &S), ptr->error_message, ptr->error_message); /** - get transfer = source * sqrt(pi/(2l+1))/q = source*[tau0-tau] * sqrt(pi/(2l+1))/(l+1/2) */ IPhiFlat = sqrt(_PI_/(2.*l))*(1.-0.25/l+1./32./(l*l)); *trsf = IPhiFlat*S; if (ptw->sgnK == 0) { *trsf /= (l+0.5); } else { *trsf *= pow(1.-ptw->K*l*l/q/q,-1./4.)/(tau0_minus_tau_limber*q); } } else if (radial_type == SCALAR_TEMPERATURE_1) { if (((l+1.5)/q > ptw->tau0_minus_tau[0]) || ((l-0.5)/q < ptw->tau0_minus_tau[ptw->tau_size-1])) { *trsf = 0.; return _SUCCESS_; } class_call(transfer_limber_interpolate(ptr, ptw->tau0_minus_tau, ptw->sources, ptw->tau_size, (l+1.5)/q, &Sp), ptr->error_message, ptr->error_message); class_call(transfer_limber_interpolate(ptr, ptw->tau0_minus_tau, ptw->sources, ptw->tau_size, (l-0.5)/q, &Sm), ptr->error_message, ptr->error_message); *trsf = -sqrt(_PI_/(2.*l+3.))*Sp/(l+1.5) * (l+1.)/(2.*l+1) +sqrt(_PI_/(2.*l-1.))*Sm/(l-0.5) * l/(2.*l+1.); } else if (radial_type == NC_RSD) { if (((l+2.5)/q > ptw->tau0_minus_tau[0]) || ((l-1.5)/q < ptw->tau0_minus_tau[ptw->tau_size-1])) { *trsf = 0.; return _SUCCESS_; } class_call(transfer_limber_interpolate(ptr, ptw->tau0_minus_tau, ptw->sources, ptw->tau_size, (l+2.5)/q, &Sp), ptr->error_message, ptr->error_message); class_call(transfer_limber_interpolate(ptr, ptw->tau0_minus_tau, ptw->sources, ptw->tau_size, (l-1.5)/q, &Sm), ptr->error_message, ptr->error_message); class_call(transfer_limber_interpolate(ptr, ptw->tau0_minus_tau, ptw->sources, ptw->tau_size, (l+0.5)/q, &S), ptr->error_message, ptr->error_message); *trsf = sqrt(_PI_/(2.*l+5.))*Sp/(l+2.5) * l*(l+2.)/(2.*l+1.)/(2.*l+3.) -sqrt(_PI_/(2.*l+1.))*S/(l+0.5) * l/(2.*l+1.)*(l/(2.*l-1.)+(l+1.)/(2.*l+3.)) +sqrt(_PI_/(2.*l-3.))*Sm/(l-1.5) * l*(l-1.)/(2.*l+1.)/(2.*l-1.); } else { class_stop(ptr->error_message, "Limber approximation has not been coded for the radial_type of index %d\n", radial_type); } return _SUCCESS_; } int transfer_limber_interpolate( struct transfers * ptr, double * tau0_minus_tau, double * sources, int tau_size, double tau0_minus_tau_limber, double * S ){ int index_tau; double dS,ddS; /** - find bracketing indices. index_tau must be at least 1 (so that index_tau-1 is at least 0) and at most tau_size-2 (so that index_tau+1 is at most tau_size-1). */ index_tau=1; while ((tau0_minus_tau[index_tau] > tau0_minus_tau_limber) && (index_tau<tau_size-2)) index_tau++; /** - interpolate by fitting a polynomial of order two; get source and its first two derivatives. Note that we are not interpolating S, but the product S*(tau0-tau). Indeed this product is regular in tau=tau0, while S alone diverges for lensing. */ /* the case where the last of the three point is the edge (tau0=tau) must be treated separately, see below */ if (index_tau < tau_size-2) { class_call(array_interpolate_parabola(tau0_minus_tau[index_tau-1], tau0_minus_tau[index_tau], tau0_minus_tau[index_tau+1], tau0_minus_tau_limber, sources[index_tau-1]*tau0_minus_tau[index_tau-1], sources[index_tau]*tau0_minus_tau[index_tau], sources[index_tau+1]*tau0_minus_tau[index_tau+1], S, &dS, &ddS, ptr->error_message), ptr->error_message, ptr->error_message); } /* in this case, we have stored a zero for sources[index_k*tau_size+index_tau+1]. But we can use in very good approximation the fact that S*(tau0-tau) is constant near tau=tau0 and replace sources[index_k*tau_size+index_tau+1]*tau0_minus_tau[index_tau+1] by sources[index_k*tau_size+index_tau]*tau0_minus_tau[index_tau] */ else { class_call(array_interpolate_parabola(tau0_minus_tau[index_tau-1], tau0_minus_tau[index_tau], tau0_minus_tau[index_tau+1], tau0_minus_tau_limber, sources[index_tau-1]*tau0_minus_tau[index_tau-1], sources[index_tau]*tau0_minus_tau[index_tau], sources[index_tau]*tau0_minus_tau[index_tau], S, &dS, &ddS, ptr->error_message), ptr->error_message, ptr->error_message); } return _SUCCESS_; } /** * This routine computes the transfer functions \f$ \Delta_l^{X} (k) * \f$) for each mode, initial condition, type, multipole l and * wavenumber k, by using the Limber approximation at ordet two, i.e * as a function of the source function and its first two derivatives * at a single value of tau * * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfers structure * @param tau0 Input : conformal time today * @param index_md Input : index of mode * @param index_tt Input : index of type * @param index_l Input : index of multipole * @param index_k Input : index of wavenumber * @param interpolated_sources Input: array of interpolated sources * @param trsf Output: transfer function \f$ \Delta_l(k) \f$ * @return the error status */ int transfer_limber2( int tau_size, struct transfers * ptr, int index_md, int index_k, double l, double k, double * tau0_minus_tau, double * sources, radial_function_type radial_type, double * trsf ){ /** Summary: */ /** - define local variables */ /* conformal time at which source must be computed */ double tau0_minus_tau_limber; int index_tau; /* interpolated source and its derivatives */ double S, dS, ddS; /** - get k, l and infer tau such that k(tau0-tau)=l+1/2; check that tau is in appropriate range */ tau0_minus_tau_limber = (l+0.5)/k; //TBC: to be updated to include curvature effects if ((tau0_minus_tau_limber > tau0_minus_tau[0]) || (tau0_minus_tau_limber < tau0_minus_tau[tau_size-1])) { *trsf = 0.; return _SUCCESS_; } /** - find bracketing indices */ index_tau=0; while ((tau0_minus_tau[index_tau] > tau0_minus_tau_limber) && (index_tau<tau_size-2)) index_tau++; /** - interpolate by fitting a polynomial of order two; get source and its first two derivatives */ class_call(array_interpolate_parabola(tau0_minus_tau[index_tau-1], tau0_minus_tau[index_tau], tau0_minus_tau[index_tau+1], tau0_minus_tau_limber, sources[index_tau-1], sources[index_tau], sources[index_tau+1], &S, &dS, &ddS, ptr->error_message), ptr->error_message, ptr->error_message); /** - get transfer from 2nd order Limber approx (infered from 0809.5112 [astro-ph]) */ *trsf = sqrt(_PI_/(2.*l+1.))/k*((1.-3./2./(2.*l+1.)/(2.*l+1.))*S+dS/k/(2.*l+1.)-0.5*ddS/k/k); return _SUCCESS_; } int transfer_can_be_neglected( struct precision * ppr, struct perturbs * ppt, struct transfers * ptr, int index_md, int index_ic, int index_tt, double ra_rec, double k, double l, short * neglect) { *neglect = _FALSE_; if (_scalars_) { if ((ppt->has_cl_cmb_temperature == _TRUE_) && (index_tt == ptr->index_tt_t0) && (l < (k-ppr->transfer_neglect_delta_k_S_t0)*ra_rec)) *neglect = _TRUE_; else if ((ppt->has_cl_cmb_temperature == _TRUE_) && (index_tt == ptr->index_tt_t1) && (l < (k-ppr->transfer_neglect_delta_k_S_t1)*ra_rec)) *neglect = _TRUE_; else if ((ppt->has_cl_cmb_temperature == _TRUE_) && (index_tt == ptr->index_tt_t2) && (l < (k-ppr->transfer_neglect_delta_k_S_t2)*ra_rec)) *neglect = _TRUE_; else if ((ppt->has_cl_cmb_polarization == _TRUE_) && (index_tt == ptr->index_tt_e) && (l < (k-ppr->transfer_neglect_delta_k_S_e)*ra_rec)) *neglect = _TRUE_; } else if (_vectors_) { if ((ppt->has_cl_cmb_temperature == _TRUE_) && (index_tt == ptr->index_tt_t1) && (l < (k-ppr->transfer_neglect_delta_k_V_t1)*ra_rec)) *neglect = _TRUE_; else if ((ppt->has_cl_cmb_temperature == _TRUE_) && (index_tt == ptr->index_tt_t2) && (l < (k-ppr->transfer_neglect_delta_k_V_t2)*ra_rec)) *neglect = _TRUE_; else if ((ppt->has_cl_cmb_polarization == _TRUE_) && (index_tt == ptr->index_tt_e) && (l < (k-ppr->transfer_neglect_delta_k_V_e)*ra_rec)) *neglect = _TRUE_; else if ((ppt->has_cl_cmb_polarization == _TRUE_) && (index_tt == ptr->index_tt_b) && (l < (k-ppr->transfer_neglect_delta_k_V_b)*ra_rec)) *neglect = _TRUE_; } else if (_tensors_) { if ((ppt->has_cl_cmb_temperature == _TRUE_) && (index_tt == ptr->index_tt_t2) && (l < (k-ppr->transfer_neglect_delta_k_T_t2)*ra_rec)) *neglect = _TRUE_; else if ((ppt->has_cl_cmb_polarization == _TRUE_) && (index_tt == ptr->index_tt_e) && (l < (k-ppr->transfer_neglect_delta_k_T_e)*ra_rec)) *neglect = _TRUE_; else if ((ppt->has_cl_cmb_polarization == _TRUE_) && (index_tt == ptr->index_tt_b) && (l < (k-ppr->transfer_neglect_delta_k_T_b)*ra_rec)) *neglect = _TRUE_; } return _SUCCESS_; } int transfer_late_source_can_be_neglected( struct precision * ppr, struct perturbs * ppt, struct transfers * ptr, int index_md, int index_tt, double l, short * neglect) { *neglect = _FALSE_; if (l > ppr->transfer_neglect_late_source*ptr->angular_rescaling) { /* sources at late times canb be neglected for CMB, excepted when there is a LISW: this means for tt_t1, t2, e */ if (_scalars_) { if (ppt->has_cl_cmb_temperature == _TRUE_) { if ((index_tt == ptr->index_tt_t1) || (index_tt == ptr->index_tt_t2)) *neglect = _TRUE_; } if (ppt->has_cl_cmb_polarization == _TRUE_) { if (index_tt == ptr->index_tt_e) *neglect = _TRUE_; } } else if (_vectors_) { if (ppt->has_cl_cmb_temperature == _TRUE_) { if ((index_tt == ptr->index_tt_t1) || (index_tt == ptr->index_tt_t2)) *neglect = _TRUE_; } if (ppt->has_cl_cmb_polarization == _TRUE_) { if ((index_tt == ptr->index_tt_e) || (index_tt == ptr->index_tt_b)) *neglect = _TRUE_; } } else if (_tensors_) { if (ppt->has_cl_cmb_polarization == _TRUE_) { if ((index_tt == ptr->index_tt_e) || (index_tt == ptr->index_tt_b)) *neglect = _TRUE_; } } } return _SUCCESS_; } int transfer_radial_function( struct transfer_workspace * ptw, struct perturbs * ppt, struct transfers * ptr, double k, int index_q, int index_l, int x_size, double * radial_function, radial_function_type radial_type ){ HyperInterpStruct * pHIS; double *chi = ptw->chi; double *cscKgen = ptw->cscKgen; double *cotKgen = ptw->cotKgen; int j; double *Phi, *dPhi, *d2Phi, *chireverse; double K=0.,k2=1.0; double sqrt_absK_over_k; double absK_over_k2; double nu=0., chi_tp=0.; double factor, s0, s2, ssqrt3, si, ssqrt2, ssqrt2i; double l = (double)ptr->l[index_l]; double rescale_argument; double rescale_amplitude; double * rescale_function; int (*interpolate_Phi)(); int (*interpolate_dPhi)(); int (*interpolate_Phid2Phi)(); int (*interpolate_PhidPhi)(); int (*interpolate_PhidPhid2Phi)(); enum Hermite_Interpolation_Order HIorder; K = ptw->K; k2 = k*k; if (ptw->sgnK==0){ /* This is the choice consistent with chi=k*(tau0-tau) and nu=1 */ sqrt_absK_over_k = 1.0; } else { K=ptw->K; sqrt_absK_over_k = sqrt(ptw->sgnK*K)/k; } absK_over_k2 =sqrt_absK_over_k*sqrt_absK_over_k; class_alloc(Phi,sizeof(double)*x_size,ptr->error_message); class_alloc(dPhi,sizeof(double)*x_size,ptr->error_message); class_alloc(d2Phi,sizeof(double)*x_size,ptr->error_message); class_alloc(chireverse,sizeof(double)*x_size,ptr->error_message); class_alloc(rescale_function,sizeof(double)*x_size,ptr->error_message); if (ptw->sgnK == 0) { pHIS = ptw->pBIS; rescale_argument = 1.; rescale_amplitude = 1.; HIorder = HERMITE4; } else if (index_q < ptr->index_q_flat_approximation) { pHIS = &(ptw->HIS); rescale_argument = 1.; rescale_amplitude = 1.; HIorder = HERMITE6; } else { pHIS = ptw->pBIS; if (ptw->sgnK == 1){ nu = ptr->q[index_q]/sqrt(K); chi_tp = asin(sqrt(ptr->l[index_l]*(ptr->l[index_l]+1.))/nu); } else{ nu = ptr->q[index_q]/sqrt(-K); chi_tp = asinh(sqrt(ptr->l[index_l]*(ptr->l[index_l]+1.))/nu); } rescale_argument = sqrt(ptr->l[index_l]*(ptr->l[index_l]+1.))/chi_tp; rescale_amplitude = pow(1.-K*ptr->l[index_l]*(ptr->l[index_l]+1.)/ptr->q[index_q]/ptr->q[index_q],-1./12.); HIorder = HERMITE4; } switch (HIorder){ case HERMITE3: interpolate_Phi = hyperspherical_Hermite3_interpolation_vector_Phi; interpolate_dPhi = hyperspherical_Hermite3_interpolation_vector_dPhi; interpolate_PhidPhi = hyperspherical_Hermite3_interpolation_vector_PhidPhi; interpolate_Phid2Phi = hyperspherical_Hermite3_interpolation_vector_Phid2Phi; interpolate_PhidPhid2Phi = hyperspherical_Hermite3_interpolation_vector_PhidPhid2Phi; break; case HERMITE4: interpolate_Phi = hyperspherical_Hermite4_interpolation_vector_Phi; interpolate_dPhi = hyperspherical_Hermite4_interpolation_vector_dPhi; interpolate_PhidPhi = hyperspherical_Hermite4_interpolation_vector_PhidPhi; interpolate_Phid2Phi = hyperspherical_Hermite4_interpolation_vector_Phid2Phi; interpolate_PhidPhid2Phi = hyperspherical_Hermite4_interpolation_vector_PhidPhid2Phi; break; case HERMITE6: interpolate_Phi = hyperspherical_Hermite6_interpolation_vector_Phi; interpolate_dPhi = hyperspherical_Hermite6_interpolation_vector_dPhi; interpolate_PhidPhi = hyperspherical_Hermite6_interpolation_vector_PhidPhi; interpolate_Phid2Phi = hyperspherical_Hermite6_interpolation_vector_Phid2Phi; interpolate_PhidPhid2Phi = hyperspherical_Hermite6_interpolation_vector_PhidPhid2Phi; break; } //Reverse chi for (j=0; j<x_size; j++) { chireverse[j] = chi[x_size-1-j]*rescale_argument; if (rescale_amplitude == 1.) { rescale_function[j] = 1.; } else { if (ptw->sgnK == 1) { rescale_function[j] = MIN( rescale_amplitude * (1 + 0.34 * atan(ptr->l[index_l]/nu) * (chireverse[j]/rescale_argument-chi_tp) + 2.00 * pow(atan(ptr->l[index_l]/nu) * (chireverse[j]/rescale_argument-chi_tp),2)), chireverse[j]/rescale_argument/sin(chireverse[j]/rescale_argument) ); } else { rescale_function[j] = MAX( rescale_amplitude * (1 - 0.38 * atan(ptr->l[index_l]/nu) * (chireverse[j]/rescale_argument-chi_tp) + 0.40 * pow(atan(ptr->l[index_l]/nu) * (chireverse[j]/rescale_argument-chi_tp),2)), chireverse[j]/rescale_argument/sinh(chireverse[j]/rescale_argument) ); } } } /* class_test(pHIS->x[0] > chireverse[0], ptr->error_message, "Bessels need to be interpolated at %e, outside the range in which they have been computed (>%e). Decrease their x_min.", chireverse[0], pHIS->x[0]); */ class_test((pHIS->x[pHIS->x_size-1] < chireverse[x_size-1]) && (ptw->sgnK != 1), ptr->error_message, "Bessels need to be interpolated at %e, outside the range in which they have been computed (<%e). Increase their x_max.", chireverse[x_size-1], pHIS->x[pHIS->x_size-1] ); switch (radial_type){ case SCALAR_TEMPERATURE_0: class_call(interpolate_Phi(pHIS, x_size, index_l, chireverse, Phi, ptr->error_message), ptr->error_message, ptr->error_message); //hyperspherical_Hermite_interpolation_vector(pHIS, x_size, index_l, chireverse, Phi, NULL, NULL); for (j=0; j<x_size; j++) radial_function[x_size-1-j] = Phi[j]*rescale_function[j]; break; case SCALAR_TEMPERATURE_1: class_call(interpolate_dPhi(pHIS, x_size, index_l, chireverse, dPhi, ptr->error_message), ptr->error_message, ptr->error_message); //hyperspherical_Hermite_interpolation_vector(pHIS, x_size, index_l, chireverse, NULL, dPhi, NULL); for (j=0; j<x_size; j++) radial_function[x_size-1-j] = sqrt_absK_over_k*dPhi[j]*rescale_argument*rescale_function[j]; break; case SCALAR_TEMPERATURE_2: class_call(interpolate_Phid2Phi(pHIS, x_size, index_l, chireverse, Phi, d2Phi, ptr->error_message), ptr->error_message, ptr->error_message); //hyperspherical_Hermite_interpolation_vector(pHIS, x_size, index_l, chireverse, Phi, NULL, d2Phi); s2 = sqrt(1.0-3.0*K/k2); factor = 1.0/(2.0*s2); for (j=0; j<x_size; j++) radial_function[x_size-1-j] = factor*(3*absK_over_k2*d2Phi[j]*rescale_argument*rescale_argument+Phi[j])*rescale_function[j]; break; case SCALAR_POLARISATION_E: class_call(interpolate_Phi(pHIS, x_size, index_l, chireverse, Phi, ptr->error_message), ptr->error_message, ptr->error_message); //hyperspherical_Hermite_interpolation_vector(pHIS, x_size, index_l, chireverse, Phi, NULL, NULL); s2 = sqrt(1.0-3.0*K/k2); factor = sqrt(3.0/8.0*(l+2.0)*(l+1.0)*l*(l-1.0))/s2; for (j=0; j<x_size; j++) radial_function[x_size-1-j] = factor*cscKgen[x_size-1-j]*cscKgen[x_size-1-j]*Phi[j]*rescale_function[j]; break; case VECTOR_TEMPERATURE_1: class_call(interpolate_Phi(pHIS, x_size, index_l, chireverse, Phi, ptr->error_message), ptr->error_message, ptr->error_message); //hyperspherical_Hermite_interpolation_vector(pHIS, x_size, index_l, chireverse, Phi, NULL, NULL); s0 = sqrt(1.0+K/k2); factor = sqrt(0.5*l*(l+1))/s0; for (j=0; j<x_size; j++) radial_function[x_size-1-j] = factor*cscKgen[x_size-1-j]*Phi[j]*rescale_function[j]; break; case VECTOR_TEMPERATURE_2: class_call(interpolate_PhidPhi(pHIS, x_size, index_l, chireverse, Phi, dPhi, ptr->error_message), ptr->error_message, ptr->error_message); //hyperspherical_Hermite_interpolation_vector(pHIS, x_size, index_l, chireverse, Phi, dPhi, NULL); s0 = sqrt(1.0+K/k2); ssqrt3 = sqrt(1.0-2.0*K/k2); factor = sqrt(1.5*l*(l+1))/s0/ssqrt3; for (j=0; j<x_size; j++) radial_function[x_size-1-j] = factor*cscKgen[x_size-1-j]*(sqrt_absK_over_k*dPhi[j]*rescale_argument-cotKgen[j]*Phi[j])*rescale_function[j]; break; case VECTOR_POLARISATION_E: class_call(interpolate_PhidPhi(pHIS, x_size, index_l, chireverse, Phi, dPhi, ptr->error_message), ptr->error_message, ptr->error_message); // hyperspherical_Hermite_interpolation_vector(pHIS, x_size, index_l, chireverse, Phi, dPhi, NULL); s0 = sqrt(1.0+K/k2); ssqrt3 = sqrt(1.0-2.0*K/k2); factor = 0.5*sqrt((l-1.0)*(l+2.0))/s0/ssqrt3; for (j=0; j<x_size; j++) radial_function[x_size-1-j] = factor*cscKgen[x_size-1-j]*(cotKgen[j]*Phi[j]+sqrt_absK_over_k*dPhi[j]*rescale_argument)*rescale_function[j]; break; case VECTOR_POLARISATION_B: class_call(interpolate_Phi(pHIS, x_size, index_l, chireverse, Phi, ptr->error_message), ptr->error_message, ptr->error_message); //hyperspherical_Hermite_interpolation_vector(pHIS, x_size, index_l, chireverse, Phi, NULL, NULL); s0 = sqrt(1.0+K/k2); ssqrt3 = sqrt(1.0-2.0*K/k2); si = sqrt(1.0+2.0*K/k2); factor = 0.5*sqrt((l-1.0)*(l+2.0))*si/s0/ssqrt3; for (j=0; j<x_size; j++) radial_function[x_size-1-j] = factor*cscKgen[x_size-1-j]*Phi[j]*rescale_function[j]; break; case TENSOR_TEMPERATURE_2: class_call(interpolate_Phi(pHIS, x_size, index_l, chireverse, Phi, ptr->error_message), ptr->error_message, ptr->error_message); //hyperspherical_Hermite_interpolation_vector(pHIS, x_size, index_l, chireverse, Phi, NULL, NULL); ssqrt2 = sqrt(1.0-1.0*K/k2); si = sqrt(1.0+2.0*K/k2); factor = sqrt(3.0/8.0*(l+2.0)*(l+1.0)*l*(l-1.0))/si/ssqrt2; for (j=0; j<x_size; j++) radial_function[x_size-1-j] = factor*cscKgen[x_size-1-j]*cscKgen[x_size-1-j]*Phi[j]*rescale_function[j]; break; case TENSOR_POLARISATION_E: class_call(interpolate_PhidPhid2Phi(pHIS, x_size, index_l, chireverse, Phi, dPhi, d2Phi, ptr->error_message), ptr->error_message, ptr->error_message); //hyperspherical_Hermite_interpolation_vector(pHIS, x_size, index_l, chireverse, Phi, NULL, NULL); ssqrt2 = sqrt(1.0-1.0*K/k2); si = sqrt(1.0+2.0*K/k2); factor = 0.25/si/ssqrt2; for (j=0; j<x_size; j++) radial_function[x_size-1-j] = factor*(absK_over_k2*d2Phi[j]*rescale_argument*rescale_argument +4.0*cotKgen[x_size-1-j]*sqrt_absK_over_k*dPhi[j]*rescale_argument -(1.0+4*K/k2-2.0*cotKgen[x_size-1-j]*cotKgen[x_size-1-j])*Phi[j])*rescale_function[j]; break; case TENSOR_POLARISATION_B: class_call(interpolate_PhidPhi(pHIS, x_size, index_l, chireverse, Phi, dPhi, ptr->error_message), ptr->error_message, ptr->error_message); //hyperspherical_Hermite_interpolation_vector(pHIS, x_size, index_l, chireverse, Phi, dPhi, NULL); ssqrt2i = sqrt(1.0+3.0*K/k2); ssqrt2 = sqrt(1.0-1.0*K/k2); si = sqrt(1.0+2.0*K/k2); factor = 0.5*ssqrt2i/ssqrt2/si; for (j=0; j<x_size; j++) radial_function[x_size-1-j] = factor*(sqrt_absK_over_k*dPhi[j]*rescale_argument+2.0*cotKgen[x_size-1-j]*Phi[j])*rescale_function[j]; break; case NC_RSD: class_call(interpolate_Phid2Phi(pHIS, x_size, index_l, chireverse, Phi, d2Phi, ptr->error_message), ptr->error_message, ptr->error_message); //hyperspherical_Hermite_interpolation_vector(pHIS, x_size, index_l, chireverse, Phi, NULL, d2Phi); //s2 = sqrt(1.0-3.0*K/k2); factor = 1.0; for (j=0; j<x_size; j++) radial_function[x_size-1-j] = factor*d2Phi[j]*rescale_argument*rescale_argument*rescale_function[j]; break; } free(Phi); free(dPhi); free(d2Phi); free(chireverse); free(rescale_function); return _SUCCESS_; } int transfer_select_radial_function( struct perturbs * ppt, struct transfers * ptr, int index_md, int index_tt, radial_function_type * radial_type ) { /* generic case leading to generic bessel function (it applies also to all nonCMB types: lcmb, density, lensing) */ *radial_type = SCALAR_TEMPERATURE_0; /* other specific cases */ if (_scalars_) { if (ppt->has_cl_cmb_temperature == _TRUE_) { if (index_tt == ptr->index_tt_t0) { *radial_type = SCALAR_TEMPERATURE_0; } if (index_tt == ptr->index_tt_t1) { *radial_type = SCALAR_TEMPERATURE_1; } if (index_tt == ptr->index_tt_t2) { *radial_type = SCALAR_TEMPERATURE_2; } } if (ppt->has_cl_cmb_polarization == _TRUE_) { if (index_tt == ptr->index_tt_e) { *radial_type = SCALAR_POLARISATION_E; } } if (_index_tt_in_range_(ptr->index_tt_d1, ppt->selection_num, ppt->has_nc_rsd)) *radial_type = SCALAR_TEMPERATURE_1; if (_index_tt_in_range_(ptr->index_tt_rsd, ppt->selection_num, ppt->has_nc_rsd)) *radial_type = NC_RSD; if (_index_tt_in_range_(ptr->index_tt_nc_g5, ppt->selection_num, ppt->has_nc_gr)) *radial_type = SCALAR_TEMPERATURE_1; } if (_vectors_) { if (ppt->has_cl_cmb_temperature == _TRUE_) { if (index_tt == ptr->index_tt_t1) { *radial_type = VECTOR_TEMPERATURE_1; } if (index_tt == ptr->index_tt_t2) { *radial_type = VECTOR_TEMPERATURE_2; } } if (ppt->has_cl_cmb_polarization == _TRUE_) { if (index_tt == ptr->index_tt_e) { *radial_type = VECTOR_POLARISATION_E; } if (index_tt == ptr->index_tt_b) { *radial_type = VECTOR_POLARISATION_B; } } } if (_tensors_) { if (ppt->has_cl_cmb_temperature == _TRUE_) { if (index_tt == ptr->index_tt_t2) { *radial_type = TENSOR_TEMPERATURE_2; } } if (ppt->has_cl_cmb_polarization == _TRUE_) { if (index_tt == ptr->index_tt_e) { *radial_type = TENSOR_POLARISATION_E; } if (index_tt == ptr->index_tt_b) { *radial_type = TENSOR_POLARISATION_B; } } } return _SUCCESS_; } /* for reading global selection function (ie the one multiplying the selection function of each bin) */ int transfer_global_selection_read( struct transfers * ptr ) { /* for reading selection function */ FILE * input_file; int row,status; double tmp1,tmp2; ptr->nz_size = 0; if (ptr->has_nz_file == _TRUE_) { input_file = fopen(ptr->nz_file_name,"r"); class_test(input_file == NULL, ptr->error_message, "Could not open file %s!",ptr->nz_file_name); /* Find size of table */ for (row=0,status=2; status==2; row++){ status = fscanf(input_file,"%lf %lf",&tmp1,&tmp2); } rewind(input_file); ptr->nz_size = row-1; /* Allocate room for interpolation table */ class_alloc(ptr->nz_z,sizeof(double)*ptr->nz_size,ptr->error_message); class_alloc(ptr->nz_nz,sizeof(double)*ptr->nz_size,ptr->error_message); class_alloc(ptr->nz_ddnz,sizeof(double)*ptr->nz_size,ptr->error_message); for (row=0; row<ptr->nz_size; row++){ status = fscanf(input_file,"%lf %lf", &ptr->nz_z[row],&ptr->nz_nz[row]); //printf("%d: (z,dNdz) = (%g,%g)\n",row,ptr->nz_z[row],ptr->nz_nz[row]); } fclose(input_file); /* Call spline interpolation: */ class_call(array_spline_table_lines(ptr->nz_z, ptr->nz_size, ptr->nz_nz, 1, ptr->nz_ddnz, _SPLINE_EST_DERIV_, ptr->error_message), ptr->error_message, ptr->error_message); } ptr->nz_evo_size = 0; if (ptr->has_nz_evo_file == _TRUE_) { input_file = fopen(ptr->nz_evo_file_name,"r"); class_test(input_file == NULL, ptr->error_message, "Could not open file %s!",ptr->nz_evo_file_name); /* Find size of table */ for (row=0,status=2; status==2; row++){ status = fscanf(input_file,"%lf %lf",&tmp1,&tmp2); } rewind(input_file); ptr->nz_evo_size = row-1; /* Allocate room for interpolation table */ class_alloc(ptr->nz_evo_z,sizeof(double)*ptr->nz_evo_size,ptr->error_message); class_alloc(ptr->nz_evo_nz,sizeof(double)*ptr->nz_evo_size,ptr->error_message); class_alloc(ptr->nz_evo_dlog_nz,sizeof(double)*ptr->nz_evo_size,ptr->error_message); class_alloc(ptr->nz_evo_dd_dlog_nz,sizeof(double)*ptr->nz_evo_size,ptr->error_message); for (row=0; row<ptr->nz_evo_size; row++){ status = fscanf(input_file,"%lf %lf", &ptr->nz_evo_z[row],&ptr->nz_evo_nz[row]); } fclose(input_file); /* infer dlog(dN/dz)/dz from dN/dz */ ptr->nz_evo_dlog_nz[0] = (ptr->nz_evo_nz[1]-ptr->nz_evo_nz[0]) /(ptr->nz_evo_z[1]-ptr->nz_evo_z[0]); for (row=1; row<ptr->nz_evo_size-1; row++){ ptr->nz_evo_dlog_nz[row] = (ptr->nz_evo_nz[row+1]-ptr->nz_evo_nz[row-1]) /(ptr->nz_evo_z[row+1]-ptr->nz_evo_z[row-1]); } ptr->nz_evo_dlog_nz[ptr->nz_evo_size-1] = (ptr->nz_evo_nz[ptr->nz_evo_size-1]-ptr->nz_evo_nz[ptr->nz_evo_size-2]) /(ptr->nz_evo_z[ptr->nz_evo_size-1]-ptr->nz_evo_z[ptr->nz_evo_size-2]); /* to test that the file is read: for (row=0; row<ptr->nz_evo_size; row++){ fprintf(stdout,"%d: (z,dNdz,dlndNdzdz) = (%g,%g,%g)\n",row,ptr->nz_evo_z[row],ptr->nz_evo_nz[row],ptr->nz_evo_dlog_nz[row]); } */ /* Call spline interpolation: */ class_call(array_spline_table_lines(ptr->nz_evo_z, ptr->nz_evo_size, ptr->nz_evo_dlog_nz, 1, ptr->nz_evo_dd_dlog_nz, _SPLINE_EST_DERIV_, ptr->error_message), ptr->error_message, ptr->error_message); } return _SUCCESS_; }; int transfer_workspace_init( struct transfers * ptr, struct precision * ppr, struct transfer_workspace **ptw, int perturb_tau_size, int tau_size_max, double K, int sgnK, double tau0_minus_tau_cut, HyperInterpStruct * pBIS){ class_calloc(*ptw,1,sizeof(struct transfer_workspace),ptr->error_message); (*ptw)->tau_size_max = tau_size_max; (*ptw)->l_size = ptr->l_size_max; (*ptw)->HIS_allocated=_FALSE_; (*ptw)->pBIS = pBIS; (*ptw)->K = K; (*ptw)->sgnK = sgnK; (*ptw)->tau0_minus_tau_cut = tau0_minus_tau_cut; (*ptw)->neglect_late_source = _FALSE_; class_alloc((*ptw)->interpolated_sources,perturb_tau_size*sizeof(double),ptr->error_message); class_alloc((*ptw)->sources,tau_size_max*sizeof(double),ptr->error_message); class_alloc((*ptw)->tau0_minus_tau,tau_size_max*sizeof(double),ptr->error_message); class_alloc((*ptw)->w_trapz,tau_size_max*sizeof(double),ptr->error_message); class_alloc((*ptw)->chi,tau_size_max*sizeof(double),ptr->error_message); class_alloc((*ptw)->cscKgen,tau_size_max*sizeof(double),ptr->error_message); class_alloc((*ptw)->cotKgen,tau_size_max*sizeof(double),ptr->error_message); return _SUCCESS_; } int transfer_workspace_free( struct transfers * ptr, struct transfer_workspace *ptw ) { if (ptw->HIS_allocated==_TRUE_){ //Free HIS structure: class_call(hyperspherical_HIS_free(&(ptw->HIS),ptr->error_message), ptr->error_message, ptr->error_message); } free(ptw->interpolated_sources); free(ptw->sources); free(ptw->tau0_minus_tau); free(ptw->w_trapz); free(ptw->chi); free(ptw->cscKgen); free(ptw->cotKgen); free(ptw); return _SUCCESS_; } int transfer_update_HIS( struct precision * ppr, struct transfers * ptr, struct transfer_workspace * ptw, int index_q, double tau0 ) { double nu,new_nu; int int_nu; double xmin, xmax, sampling, phiminabs, xtol; double sqrt_absK; int l_size_max; int index_l_left,index_l_right; if (ptw->HIS_allocated == _TRUE_) { class_call(hyperspherical_HIS_free(&(ptw->HIS),ptr->error_message), ptr->error_message, ptr->error_message); ptw->HIS_allocated = _FALSE_; } if ((ptw->sgnK!=0) && (index_q < ptr->index_q_flat_approximation)) { xmin = ppr->hyper_x_min; sqrt_absK = sqrt(ptw->sgnK*ptw->K); xmax = sqrt_absK*tau0; nu = ptr->q[index_q]/sqrt_absK; if (ptw->sgnK == 1) { xmax = MIN(xmax,_PI_/2.0-ppr->hyper_x_min); //We only need solution on [0;pi/2] int_nu = (int)(nu+0.2); new_nu = (double)int_nu; class_test(nu-new_nu > 1.e-6, ptr->error_message, "problem in q list definition in closed case for index_q=%d, nu=%e, nu-int(nu)=%e",index_q,nu,nu-new_nu); nu = new_nu; } if (nu > ppr->hyper_nu_sampling_step) sampling = ppr->hyper_sampling_curved_high_nu; else sampling = ppr->hyper_sampling_curved_low_nu; /* find the highest value of l such that x_nonzero < xmax = sqrt(|K|) tau0. That will be l_max. */ l_size_max = ptr->l_size_max; if (ptw->sgnK == 1) while ((double)ptr->l[l_size_max-1] >= nu) l_size_max--; if (ptw->sgnK == -1){ xtol = ppr->hyper_x_tol; phiminabs = ppr->hyper_phi_min_abs; /** First try to find lmax using fast approximation: */ index_l_left=0; index_l_right=l_size_max-1; class_call(transfer_get_lmax(hyperspherical_get_xmin_from_approx, ptw->sgnK, nu, ptr->l, l_size_max, phiminabs, xmax, xtol, &index_l_left, &index_l_right, ptr->error_message), ptr->error_message, ptr->error_message); /** Now use WKB approximation to eventually modify borders: */ class_call(transfer_get_lmax(hyperspherical_get_xmin_from_Airy, ptw->sgnK, nu, ptr->l, l_size_max, phiminabs, xmax, xtol, &index_l_left, &index_l_right, ptr->error_message), ptr->error_message, ptr->error_message); l_size_max = index_l_right+1; } class_test(nu <= 0., ptr->error_message, "nu=%e when index_q=%d, q=%e, K=%e, sqrt(|K|)=%e; instead nu should always be strictly positive", nu,index_q,ptr->q[index_q],ptw->K,sqrt_absK); class_call(hyperspherical_HIS_create(ptw->sgnK, nu, l_size_max, ptr->l, xmin, xmax, sampling, ptr->l[l_size_max-1]+1, ppr->hyper_phi_min_abs, &(ptw->HIS), ptr->error_message), ptr->error_message, ptr->error_message); ptw->HIS_allocated = _TRUE_; } return _SUCCESS_; } int transfer_get_lmax(int (*get_xmin_generic)(int sgnK, int l, double nu, double xtol, double phiminabs, double *x_nonzero, int *fevals), int sgnK, double nu, int *lvec, int lsize, double phiminabs, double xmax, double xtol, int *index_l_left, int *index_l_right, ErrorMsg error_message){ double x_nonzero; int fevals=0, index_l_mid; int multiplier; int right_boundary_checked = _FALSE_; int hil=0,hir=0,bini=0; class_call(get_xmin_generic(sgnK, lvec[0], nu, xtol, phiminabs, &x_nonzero, &fevals), error_message, error_message); if (x_nonzero >= xmax){ //printf("None relevant\n"); //x at left boundary is already larger than xmax. *index_l_right = MAX(lsize-1,1); return _SUCCESS_; } class_call(get_xmin_generic(sgnK, lvec[lsize-1], nu, xtol, phiminabs, &x_nonzero, &fevals), error_message, error_message); if (x_nonzero < xmax){ //All Bessels are relevant //printf("All relevant\n"); *index_l_left = MAX(0,(lsize-2)); return _SUCCESS_; } /** Hunt for left boundary: */ for (multiplier=1; ;multiplier *= 5){ hil++; class_call(get_xmin_generic(sgnK, lvec[*index_l_left], nu, xtol, phiminabs, &x_nonzero, &fevals), error_message, error_message); //printf("Hunt left, iter = %d, x_nonzero=%g\n",hil,x_nonzero); if (x_nonzero <= xmax){ //Boundary found break; } else{ //We can use current index_l_left as index_l_right: *index_l_right = *index_l_left; right_boundary_checked = _TRUE_; } //Update index_l_left: *index_l_left = (*index_l_left)-multiplier; if (*index_l_left<=0){ *index_l_left = 0; break; } } /** If not found, hunt for right boundary: */ if (right_boundary_checked == _FALSE_){ for (multiplier=1; ;multiplier *= 5){ hir++; //printf("right iteration %d,index_l_right:%d\n",hir,*index_l_right); class_call(get_xmin_generic(sgnK, lvec[*index_l_right], nu, xtol, phiminabs, &x_nonzero, &fevals), error_message, error_message); if (x_nonzero >= xmax){ //Boundary found break; } else{ //We can use current index_l_right as index_l_left: *index_l_left = *index_l_right; } //Update index_l_right: *index_l_right = (*index_l_right)+multiplier; if (*index_l_right>=(lsize-1)){ *index_l_right = lsize-1; break; } } } // int fevalshunt=fevals; fevals=0; //Do binary search // printf("Do binary search in get_lmax. \n"); //printf("Region: [%d, %d]\n",*index_l_left,*index_l_right); while (((*index_l_right) - (*index_l_left)) > 1) { bini++; index_l_mid= (int)(0.5*((*index_l_right)+(*index_l_left))); //printf("left:%d, mid=%d, right=%d\n",*index_l_left,index_l_mid,*index_l_right); class_call(get_xmin_generic(sgnK, lvec[index_l_mid], nu, xtol, phiminabs, &x_nonzero, &fevals), error_message, error_message); if (x_nonzero < xmax) *index_l_left=index_l_mid; else *index_l_right=index_l_mid; } //printf("Done\n"); /** printf("Hunt left iter=%d, hunt right iter=%d (fevals: %d). For binary seach: %d (fevals: %d)\n", hil,hir,fevalshunt,bini,fevals); */ return _SUCCESS_; }
convolution_3x3_pack1to8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_pack1to8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); __m256 _bias1 = bias ? _mm256_loadu_ps((const float*)bias + (p + 1) * 8) : _mm256_set1_ps(0.f); out0.fill(_bias0); out1.fill(_bias1); const float* k0 = kernel.channel(p); const float* k1 = kernel.channel(p + 1); for (int q = 0; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00_0 = _mm256_loadu_ps(k0); __m256 _k01_0 = _mm256_loadu_ps(k0 + 8); __m256 _k02_0 = _mm256_loadu_ps(k0 + 16); __m256 _k10_0 = _mm256_loadu_ps(k0 + 24); __m256 _k11_0 = _mm256_loadu_ps(k0 + 32); __m256 _k12_0 = _mm256_loadu_ps(k0 + 40); __m256 _k20_0 = _mm256_loadu_ps(k0 + 48); __m256 _k21_0 = _mm256_loadu_ps(k0 + 56); __m256 _k22_0 = _mm256_loadu_ps(k0 + 64); __m256 _k00_1 = _mm256_loadu_ps(k1); __m256 _k01_1 = _mm256_loadu_ps(k1 + 8); __m256 _k02_1 = _mm256_loadu_ps(k1 + 16); __m256 _k10_1 = _mm256_loadu_ps(k1 + 24); __m256 _k11_1 = _mm256_loadu_ps(k1 + 32); __m256 _k12_1 = _mm256_loadu_ps(k1 + 40); __m256 _k20_1 = _mm256_loadu_ps(k1 + 48); __m256 _k21_1 = _mm256_loadu_ps(k1 + 56); __m256 _k22_1 = _mm256_loadu_ps(k1 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00); _sum10 = _mm256_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_fmadd_ps(_r23, _k22_1, _sum10); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _sum11 = _mm256_loadu_ps(outptr1 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); _sum01 = _mm256_fmadd_ps(_r02, _k00_0, _sum01); _sum01 = _mm256_fmadd_ps(_r03, _k01_0, _sum01); _sum01 = _mm256_fmadd_ps(_r04, _k02_0, _sum01); _sum01 = _mm256_fmadd_ps(_r12, _k10_0, _sum01); _sum01 = _mm256_fmadd_ps(_r13, _k11_0, _sum01); _sum01 = _mm256_fmadd_ps(_r14, _k12_0, _sum01); _sum01 = _mm256_fmadd_ps(_r22, _k20_0, _sum01); _sum01 = _mm256_fmadd_ps(_r23, _k21_0, _sum01); _sum01 = _mm256_fmadd_ps(_r24, _k22_0, _sum01); _sum11 = _mm256_fmadd_ps(_r02, _k00_1, _sum11); _sum11 = _mm256_fmadd_ps(_r03, _k01_1, _sum11); _sum11 = _mm256_fmadd_ps(_r04, _k02_1, _sum11); _sum11 = _mm256_fmadd_ps(_r12, _k10_1, _sum11); _sum11 = _mm256_fmadd_ps(_r13, _k11_1, _sum11); _sum11 = _mm256_fmadd_ps(_r14, _k12_1, _sum11); _sum11 = _mm256_fmadd_ps(_r22, _k20_1, _sum11); _sum11 = _mm256_fmadd_ps(_r23, _k21_1, _sum11); _sum11 = _mm256_fmadd_ps(_r24, _k22_1, _sum11); _mm256_storeu_ps(outptr0 + 8, _sum01); _mm256_storeu_ps(outptr1 + 8, _sum11); __m256 _sum02 = _mm256_loadu_ps(outptr0 + 16); __m256 _sum12 = _mm256_loadu_ps(outptr1 + 16); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _sum02 = _mm256_fmadd_ps(_r03, _k00_0, _sum02); _sum02 = _mm256_fmadd_ps(_r04, _k01_0, _sum02); _sum02 = _mm256_fmadd_ps(_r05, _k02_0, _sum02); _sum02 = _mm256_fmadd_ps(_r13, _k10_0, _sum02); _sum02 = _mm256_fmadd_ps(_r14, _k11_0, _sum02); _sum02 = _mm256_fmadd_ps(_r15, _k12_0, _sum02); _sum02 = _mm256_fmadd_ps(_r23, _k20_0, _sum02); _sum02 = _mm256_fmadd_ps(_r24, _k21_0, _sum02); _sum02 = _mm256_fmadd_ps(_r25, _k22_0, _sum02); _sum12 = _mm256_fmadd_ps(_r03, _k00_1, _sum12); _sum12 = _mm256_fmadd_ps(_r04, _k01_1, _sum12); _sum12 = _mm256_fmadd_ps(_r05, _k02_1, _sum12); _sum12 = _mm256_fmadd_ps(_r13, _k10_1, _sum12); _sum12 = _mm256_fmadd_ps(_r14, _k11_1, _sum12); _sum12 = _mm256_fmadd_ps(_r15, _k12_1, _sum12); _sum12 = _mm256_fmadd_ps(_r23, _k20_1, _sum12); _sum12 = _mm256_fmadd_ps(_r24, _k21_1, _sum12); _sum12 = _mm256_fmadd_ps(_r25, _k22_1, _sum12); _mm256_storeu_ps(outptr0 + 16, _sum02); _mm256_storeu_ps(outptr1 + 16, _sum12); __m256 _r06 = _mm256_broadcast_ss(r0 + 5); __m256 _r16 = _mm256_broadcast_ss(r1 + 5); __m256 _r26 = _mm256_broadcast_ss(r2 + 5); __m256 _sum03 = _mm256_loadu_ps(outptr0 + 24); __m256 _sum13 = _mm256_loadu_ps(outptr1 + 24); _sum03 = _mm256_fmadd_ps(_r04, _k00_0, _sum03); _sum03 = _mm256_fmadd_ps(_r05, _k01_0, _sum03); _sum03 = _mm256_fmadd_ps(_r06, _k02_0, _sum03); _sum03 = _mm256_fmadd_ps(_r14, _k10_0, _sum03); _sum03 = _mm256_fmadd_ps(_r15, _k11_0, _sum03); _sum03 = _mm256_fmadd_ps(_r16, _k12_0, _sum03); _sum03 = _mm256_fmadd_ps(_r24, _k20_0, _sum03); _sum03 = _mm256_fmadd_ps(_r25, _k21_0, _sum03); _sum03 = _mm256_fmadd_ps(_r26, _k22_0, _sum03); _sum13 = _mm256_fmadd_ps(_r04, _k00_1, _sum13); _sum13 = _mm256_fmadd_ps(_r05, _k01_1, _sum13); _sum13 = _mm256_fmadd_ps(_r06, _k02_1, _sum13); _sum13 = _mm256_fmadd_ps(_r14, _k10_1, _sum13); _sum13 = _mm256_fmadd_ps(_r15, _k11_1, _sum13); _sum13 = _mm256_fmadd_ps(_r16, _k12_1, _sum13); _sum13 = _mm256_fmadd_ps(_r24, _k20_1, _sum13); _sum13 = _mm256_fmadd_ps(_r25, _k21_1, _sum13); _sum13 = _mm256_fmadd_ps(_r26, _k22_1, _sum13); _mm256_storeu_ps(outptr0 + 24, _sum03); _mm256_storeu_ps(outptr1 + 24, _sum13); r0 += 4; r1 += 4; r2 += 4; outptr0 += 32; outptr1 += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00); _sum10 = _mm256_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_fmadd_ps(_r23, _k22_1, _sum10); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _sum11 = _mm256_loadu_ps(outptr1 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); _sum01 = _mm256_fmadd_ps(_r02, _k00_0, _sum01); _sum01 = _mm256_fmadd_ps(_r03, _k01_0, _sum01); _sum01 = _mm256_fmadd_ps(_r04, _k02_0, _sum01); _sum01 = _mm256_fmadd_ps(_r12, _k10_0, _sum01); _sum01 = _mm256_fmadd_ps(_r13, _k11_0, _sum01); _sum01 = _mm256_fmadd_ps(_r14, _k12_0, _sum01); _sum01 = _mm256_fmadd_ps(_r22, _k20_0, _sum01); _sum01 = _mm256_fmadd_ps(_r23, _k21_0, _sum01); _sum01 = _mm256_fmadd_ps(_r24, _k22_0, _sum01); _sum11 = _mm256_fmadd_ps(_r02, _k00_1, _sum11); _sum11 = _mm256_fmadd_ps(_r03, _k01_1, _sum11); _sum11 = _mm256_fmadd_ps(_r04, _k02_1, _sum11); _sum11 = _mm256_fmadd_ps(_r12, _k10_1, _sum11); _sum11 = _mm256_fmadd_ps(_r13, _k11_1, _sum11); _sum11 = _mm256_fmadd_ps(_r14, _k12_1, _sum11); _sum11 = _mm256_fmadd_ps(_r22, _k20_1, _sum11); _sum11 = _mm256_fmadd_ps(_r23, _k21_1, _sum11); _sum11 = _mm256_fmadd_ps(_r24, _k22_1, _sum11); _mm256_storeu_ps(outptr0 + 8, _sum01); _mm256_storeu_ps(outptr1 + 8, _sum11); r0 += 2; r1 += 2; r2 += 2; outptr0 += 16; outptr1 += 16; } for (; j < outw; j++) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00); _sum10 = _mm256_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_fmadd_ps(_r23, _k22_1, _sum10); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); r0 += 1; r1 += 1; r2 += 1; outptr0 += 8; outptr1 += 8; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * 8; k1 += 9 * 8; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); out0.fill(_bias0); const float* k0 = kernel.channel(p); for (int q = 0; q < inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00 = _mm256_loadu_ps(k0); __m256 _k01 = _mm256_loadu_ps(k0 + 8); __m256 _k02 = _mm256_loadu_ps(k0 + 16); __m256 _k10 = _mm256_loadu_ps(k0 + 24); __m256 _k11 = _mm256_loadu_ps(k0 + 32); __m256 _k12 = _mm256_loadu_ps(k0 + 40); __m256 _k20 = _mm256_loadu_ps(k0 + 48); __m256 _k21 = _mm256_loadu_ps(k0 + 56); __m256 _k22 = _mm256_loadu_ps(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { __m256 _sum0 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum0 = _mm256_fmadd_ps(_r01, _k00, _sum0); _sum0 = _mm256_fmadd_ps(_r02, _k01, _sum0); _sum0 = _mm256_fmadd_ps(_r03, _k02, _sum0); _sum0 = _mm256_fmadd_ps(_r11, _k10, _sum0); _sum0 = _mm256_fmadd_ps(_r12, _k11, _sum0); _sum0 = _mm256_fmadd_ps(_r13, _k12, _sum0); _sum0 = _mm256_fmadd_ps(_r21, _k20, _sum0); _sum0 = _mm256_fmadd_ps(_r22, _k21, _sum0); _sum0 = _mm256_fmadd_ps(_r23, _k22, _sum0); __m256 _sum1 = _mm256_loadu_ps(outptr0 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_fmadd_ps(_r02, _k00, _sum1); _sum1 = _mm256_fmadd_ps(_r03, _k01, _sum1); _sum1 = _mm256_fmadd_ps(_r04, _k02, _sum1); _sum1 = _mm256_fmadd_ps(_r12, _k10, _sum1); _sum1 = _mm256_fmadd_ps(_r13, _k11, _sum1); _sum1 = _mm256_fmadd_ps(_r14, _k12, _sum1); _sum1 = _mm256_fmadd_ps(_r22, _k20, _sum1); _sum1 = _mm256_fmadd_ps(_r23, _k21, _sum1); _sum1 = _mm256_fmadd_ps(_r24, _k22, _sum1); __m256 _sum2 = _mm256_loadu_ps(outptr0 + 16); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _mm256_storeu_ps(outptr0 + 8, _sum1); _sum2 = _mm256_fmadd_ps(_r03, _k00, _sum2); _sum2 = _mm256_fmadd_ps(_r04, _k01, _sum2); _sum2 = _mm256_fmadd_ps(_r05, _k02, _sum2); _sum2 = _mm256_fmadd_ps(_r13, _k10, _sum2); _sum2 = _mm256_fmadd_ps(_r14, _k11, _sum2); _sum2 = _mm256_fmadd_ps(_r15, _k12, _sum2); _sum2 = _mm256_fmadd_ps(_r23, _k20, _sum2); _sum2 = _mm256_fmadd_ps(_r24, _k21, _sum2); _sum2 = _mm256_fmadd_ps(_r25, _k22, _sum2); __m256 _sum3 = _mm256_loadu_ps(outptr0 + 24); __m256 _r06 = _mm256_broadcast_ss(r0 + 5); __m256 _r16 = _mm256_broadcast_ss(r1 + 5); __m256 _r26 = _mm256_broadcast_ss(r2 + 5); _mm256_storeu_ps(outptr0 + 16, _sum2); _sum3 = _mm256_fmadd_ps(_r04, _k00, _sum3); _sum3 = _mm256_fmadd_ps(_r05, _k01, _sum3); _sum3 = _mm256_fmadd_ps(_r06, _k02, _sum3); _sum3 = _mm256_fmadd_ps(_r14, _k10, _sum3); _sum3 = _mm256_fmadd_ps(_r15, _k11, _sum3); _sum3 = _mm256_fmadd_ps(_r16, _k12, _sum3); _sum3 = _mm256_fmadd_ps(_r24, _k20, _sum3); _sum3 = _mm256_fmadd_ps(_r25, _k21, _sum3); _sum3 = _mm256_fmadd_ps(_r26, _k22, _sum3); _mm256_storeu_ps(outptr0 + 24, _sum3); r0 += 4; r1 += 4; r2 += 4; outptr0 += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum0 = _mm256_fmadd_ps(_r01, _k00, _sum0); _sum0 = _mm256_fmadd_ps(_r02, _k01, _sum0); _sum0 = _mm256_fmadd_ps(_r03, _k02, _sum0); _sum0 = _mm256_fmadd_ps(_r11, _k10, _sum0); _sum0 = _mm256_fmadd_ps(_r12, _k11, _sum0); _sum0 = _mm256_fmadd_ps(_r13, _k12, _sum0); _sum0 = _mm256_fmadd_ps(_r21, _k20, _sum0); _sum0 = _mm256_fmadd_ps(_r22, _k21, _sum0); _sum0 = _mm256_fmadd_ps(_r23, _k22, _sum0); __m256 _sum1 = _mm256_loadu_ps(outptr0 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_fmadd_ps(_r02, _k00, _sum1); _sum1 = _mm256_fmadd_ps(_r03, _k01, _sum1); _sum1 = _mm256_fmadd_ps(_r04, _k02, _sum1); _sum1 = _mm256_fmadd_ps(_r12, _k10, _sum1); _sum1 = _mm256_fmadd_ps(_r13, _k11, _sum1); _sum1 = _mm256_fmadd_ps(_r14, _k12, _sum1); _sum1 = _mm256_fmadd_ps(_r22, _k20, _sum1); _sum1 = _mm256_fmadd_ps(_r23, _k21, _sum1); _sum1 = _mm256_fmadd_ps(_r24, _k22, _sum1); _mm256_storeu_ps(outptr0 + 8, _sum0); r0 += 2; r1 += 2; r2 += 2; outptr0 += 16; } for (; j < outw; j++) { __m256 _sum0 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum0 = _mm256_fmadd_ps(_r01, _k00, _sum0); _sum0 = _mm256_fmadd_ps(_r02, _k01, _sum0); _sum0 = _mm256_fmadd_ps(_r03, _k02, _sum0); _sum0 = _mm256_fmadd_ps(_r11, _k10, _sum0); _sum0 = _mm256_fmadd_ps(_r12, _k11, _sum0); _sum0 = _mm256_fmadd_ps(_r13, _k12, _sum0); _sum0 = _mm256_fmadd_ps(_r21, _k20, _sum0); _sum0 = _mm256_fmadd_ps(_r22, _k21, _sum0); _sum0 = _mm256_fmadd_ps(_r23, _k22, _sum0); _mm256_storeu_ps(outptr0, _sum0); r0 += 1; r1 += 1; r2 += 1; outptr0 += 8; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * 8; } } } static void conv3x3s2_pack1to8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const float* bias = _bias; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); __m256 _bias1 = bias ? _mm256_loadu_ps((const float*)bias + (p + 1) * 8) : _mm256_set1_ps(0.f); out0.fill(_bias0); out1.fill(_bias1); const float* k0 = kernel.channel(p); const float* k1 = kernel.channel(p + 1); for (int q = 0; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00_0 = _mm256_loadu_ps(k0); __m256 _k01_0 = _mm256_loadu_ps(k0 + 8); __m256 _k02_0 = _mm256_loadu_ps(k0 + 16); __m256 _k10_0 = _mm256_loadu_ps(k0 + 24); __m256 _k11_0 = _mm256_loadu_ps(k0 + 32); __m256 _k12_0 = _mm256_loadu_ps(k0 + 40); __m256 _k20_0 = _mm256_loadu_ps(k0 + 48); __m256 _k21_0 = _mm256_loadu_ps(k0 + 56); __m256 _k22_0 = _mm256_loadu_ps(k0 + 64); __m256 _k00_1 = _mm256_loadu_ps(k1); __m256 _k01_1 = _mm256_loadu_ps(k1 + 8); __m256 _k02_1 = _mm256_loadu_ps(k1 + 16); __m256 _k10_1 = _mm256_loadu_ps(k1 + 24); __m256 _k11_1 = _mm256_loadu_ps(k1 + 32); __m256 _k12_1 = _mm256_loadu_ps(k1 + 40); __m256 _k20_1 = _mm256_loadu_ps(k1 + 48); __m256 _k21_1 = _mm256_loadu_ps(k1 + 56); __m256 _k22_1 = _mm256_loadu_ps(k1 + 64); int i = 0; for (; i < outh; i++) { int nn = outw >> 2; int remain = outw & 3; for (; nn > 0; nn--) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _sum11 = _mm256_loadu_ps(outptr1 + 8); _sum10 = _mm256_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_fmadd_ps(_r23, _k22_1, _sum10); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); _sum01 = _mm256_fmadd_ps(_r03, _k00_0, _sum01); _sum01 = _mm256_fmadd_ps(_r04, _k01_0, _sum01); _sum01 = _mm256_fmadd_ps(_r05, _k02_0, _sum01); _sum01 = _mm256_fmadd_ps(_r13, _k10_0, _sum01); _sum01 = _mm256_fmadd_ps(_r14, _k11_0, _sum01); _sum01 = _mm256_fmadd_ps(_r15, _k12_0, _sum01); _sum01 = _mm256_fmadd_ps(_r23, _k20_0, _sum01); _sum01 = _mm256_fmadd_ps(_r24, _k21_0, _sum01); _sum01 = _mm256_fmadd_ps(_r25, _k22_0, _sum01); __m256 _sum02 = _mm256_loadu_ps(outptr0 + 16); __m256 _sum12 = _mm256_loadu_ps(outptr1 + 16); _sum11 = _mm256_fmadd_ps(_r03, _k00_1, _sum11); _sum11 = _mm256_fmadd_ps(_r04, _k01_1, _sum11); _sum11 = _mm256_fmadd_ps(_r05, _k02_1, _sum11); _sum11 = _mm256_fmadd_ps(_r13, _k10_1, _sum11); _sum11 = _mm256_fmadd_ps(_r14, _k11_1, _sum11); _sum11 = _mm256_fmadd_ps(_r15, _k12_1, _sum11); _sum11 = _mm256_fmadd_ps(_r23, _k20_1, _sum11); _sum11 = _mm256_fmadd_ps(_r24, _k21_1, _sum11); _sum11 = _mm256_fmadd_ps(_r25, _k22_1, _sum11); __m256 _r06 = _mm256_broadcast_ss(r0 + 5); __m256 _r07 = _mm256_broadcast_ss(r0 + 6); __m256 _r16 = _mm256_broadcast_ss(r1 + 5); __m256 _r17 = _mm256_broadcast_ss(r1 + 6); __m256 _r26 = _mm256_broadcast_ss(r2 + 5); __m256 _r27 = _mm256_broadcast_ss(r2 + 6); _mm256_storeu_ps(outptr0 + 8, _sum01); _mm256_storeu_ps(outptr1 + 8, _sum11); _sum02 = _mm256_fmadd_ps(_r05, _k00_0, _sum02); _sum02 = _mm256_fmadd_ps(_r06, _k01_0, _sum02); _sum02 = _mm256_fmadd_ps(_r07, _k02_0, _sum02); _sum02 = _mm256_fmadd_ps(_r15, _k10_0, _sum02); _sum02 = _mm256_fmadd_ps(_r16, _k11_0, _sum02); _sum02 = _mm256_fmadd_ps(_r17, _k12_0, _sum02); _sum02 = _mm256_fmadd_ps(_r25, _k20_0, _sum02); _sum02 = _mm256_fmadd_ps(_r26, _k21_0, _sum02); _sum02 = _mm256_fmadd_ps(_r27, _k22_0, _sum02); __m256 _sum03 = _mm256_loadu_ps(outptr0 + 24); __m256 _sum13 = _mm256_loadu_ps(outptr1 + 24); _sum12 = _mm256_fmadd_ps(_r05, _k00_1, _sum12); _sum12 = _mm256_fmadd_ps(_r06, _k01_1, _sum12); _sum12 = _mm256_fmadd_ps(_r07, _k02_1, _sum12); _sum12 = _mm256_fmadd_ps(_r15, _k10_1, _sum12); _sum12 = _mm256_fmadd_ps(_r16, _k11_1, _sum12); _sum12 = _mm256_fmadd_ps(_r17, _k12_1, _sum12); _sum12 = _mm256_fmadd_ps(_r25, _k20_1, _sum12); _sum12 = _mm256_fmadd_ps(_r26, _k21_1, _sum12); _sum12 = _mm256_fmadd_ps(_r27, _k22_1, _sum12); __m256 _r08 = _mm256_broadcast_ss(r0 + 7); __m256 _r09 = _mm256_broadcast_ss(r0 + 8); __m256 _r18 = _mm256_broadcast_ss(r1 + 7); __m256 _r19 = _mm256_broadcast_ss(r1 + 8); __m256 _r28 = _mm256_broadcast_ss(r2 + 7); __m256 _r29 = _mm256_broadcast_ss(r2 + 8); _mm256_storeu_ps(outptr0 + 16, _sum02); _mm256_storeu_ps(outptr1 + 16, _sum12); _sum03 = _mm256_fmadd_ps(_r07, _k00_0, _sum03); _sum03 = _mm256_fmadd_ps(_r08, _k01_0, _sum03); _sum03 = _mm256_fmadd_ps(_r09, _k02_0, _sum03); _sum03 = _mm256_fmadd_ps(_r17, _k10_0, _sum03); _sum03 = _mm256_fmadd_ps(_r18, _k11_0, _sum03); _sum03 = _mm256_fmadd_ps(_r19, _k12_0, _sum03); _sum03 = _mm256_fmadd_ps(_r27, _k20_0, _sum03); _sum03 = _mm256_fmadd_ps(_r28, _k21_0, _sum03); _sum03 = _mm256_fmadd_ps(_r29, _k22_0, _sum03); _sum13 = _mm256_fmadd_ps(_r07, _k00_1, _sum13); _sum13 = _mm256_fmadd_ps(_r08, _k01_1, _sum13); _sum13 = _mm256_fmadd_ps(_r09, _k02_1, _sum13); _sum13 = _mm256_fmadd_ps(_r17, _k10_1, _sum13); _sum13 = _mm256_fmadd_ps(_r18, _k11_1, _sum13); _sum13 = _mm256_fmadd_ps(_r19, _k12_1, _sum13); _sum13 = _mm256_fmadd_ps(_r27, _k20_1, _sum13); _sum13 = _mm256_fmadd_ps(_r28, _k21_1, _sum13); _sum13 = _mm256_fmadd_ps(_r29, _k22_1, _sum13); _mm256_storeu_ps(outptr0 + 24, _sum03); _mm256_storeu_ps(outptr1 + 24, _sum13); r0 += 8; r1 += 8; r2 += 8; outptr0 += 32; outptr1 += 32; } for (; remain > 0; remain--) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00); _sum10 = _mm256_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_fmadd_ps(_r23, _k22_1, _sum10); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); r0 += 2; r1 += 2; r2 += 2; outptr0 += 8; outptr1 += 8; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * 8; k1 += 9 * 8; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); out0.fill(_bias0); const float* k0 = kernel.channel(p); for (int q = 0; q < inch; q++) { float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00 = _mm256_loadu_ps(k0); __m256 _k01 = _mm256_loadu_ps(k0 + 8); __m256 _k02 = _mm256_loadu_ps(k0 + 16); __m256 _k10 = _mm256_loadu_ps(k0 + 24); __m256 _k11 = _mm256_loadu_ps(k0 + 32); __m256 _k12 = _mm256_loadu_ps(k0 + 40); __m256 _k20 = _mm256_loadu_ps(k0 + 48); __m256 _k21 = _mm256_loadu_ps(k0 + 56); __m256 _k22 = _mm256_loadu_ps(k0 + 64); int i = 0; for (; i < outh; i++) { int nn = outw >> 2; int remain = outw & 3; for (; nn > 0; nn--) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_fmadd_ps(_r01, _k00, _sum00); _sum00 = _mm256_fmadd_ps(_r02, _k01, _sum00); _sum00 = _mm256_fmadd_ps(_r03, _k02, _sum00); _sum00 = _mm256_fmadd_ps(_r11, _k10, _sum00); _sum00 = _mm256_fmadd_ps(_r12, _k11, _sum00); _sum00 = _mm256_fmadd_ps(_r13, _k12, _sum00); _sum00 = _mm256_fmadd_ps(_r21, _k20, _sum00); _sum00 = _mm256_fmadd_ps(_r22, _k21, _sum00); _sum00 = _mm256_fmadd_ps(_r23, _k22, _sum00); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _mm256_storeu_ps(outptr0, _sum00); _sum01 = _mm256_fmadd_ps(_r03, _k00, _sum01); _sum01 = _mm256_fmadd_ps(_r04, _k01, _sum01); _sum01 = _mm256_fmadd_ps(_r05, _k02, _sum01); _sum01 = _mm256_fmadd_ps(_r13, _k10, _sum01); _sum01 = _mm256_fmadd_ps(_r14, _k11, _sum01); _sum01 = _mm256_fmadd_ps(_r15, _k12, _sum01); _sum01 = _mm256_fmadd_ps(_r23, _k20, _sum01); _sum01 = _mm256_fmadd_ps(_r24, _k21, _sum01); _sum01 = _mm256_fmadd_ps(_r25, _k22, _sum01); __m256 _sum02 = _mm256_loadu_ps(outptr0 + 16); __m256 _r06 = _mm256_broadcast_ss(r0 + 5); __m256 _r07 = _mm256_broadcast_ss(r0 + 6); __m256 _r16 = _mm256_broadcast_ss(r1 + 5); __m256 _r17 = _mm256_broadcast_ss(r1 + 6); __m256 _r26 = _mm256_broadcast_ss(r2 + 5); __m256 _r27 = _mm256_broadcast_ss(r2 + 6); _mm256_storeu_ps(outptr0 + 8, _sum01); _sum02 = _mm256_fmadd_ps(_r05, _k00, _sum02); _sum02 = _mm256_fmadd_ps(_r06, _k01, _sum02); _sum02 = _mm256_fmadd_ps(_r07, _k02, _sum02); _sum02 = _mm256_fmadd_ps(_r15, _k10, _sum02); _sum02 = _mm256_fmadd_ps(_r16, _k11, _sum02); _sum02 = _mm256_fmadd_ps(_r17, _k12, _sum02); _sum02 = _mm256_fmadd_ps(_r25, _k20, _sum02); _sum02 = _mm256_fmadd_ps(_r26, _k21, _sum02); _sum02 = _mm256_fmadd_ps(_r27, _k22, _sum02); __m256 _sum03 = _mm256_loadu_ps(outptr0 + 24); __m256 _r08 = _mm256_broadcast_ss(r0 + 7); __m256 _r09 = _mm256_broadcast_ss(r0 + 8); __m256 _r18 = _mm256_broadcast_ss(r1 + 7); __m256 _r19 = _mm256_broadcast_ss(r1 + 8); __m256 _r28 = _mm256_broadcast_ss(r2 + 7); __m256 _r29 = _mm256_broadcast_ss(r2 + 8); _mm256_storeu_ps(outptr0 + 16, _sum02); _sum03 = _mm256_fmadd_ps(_r07, _k00, _sum03); _sum03 = _mm256_fmadd_ps(_r08, _k01, _sum03); _sum03 = _mm256_fmadd_ps(_r09, _k02, _sum03); _sum03 = _mm256_fmadd_ps(_r17, _k10, _sum03); _sum03 = _mm256_fmadd_ps(_r18, _k11, _sum03); _sum03 = _mm256_fmadd_ps(_r19, _k12, _sum03); _sum03 = _mm256_fmadd_ps(_r27, _k20, _sum03); _sum03 = _mm256_fmadd_ps(_r28, _k21, _sum03); _sum03 = _mm256_fmadd_ps(_r29, _k22, _sum03); _mm256_storeu_ps(outptr0 + 24, _sum03); r0 += 8; r1 += 8; r2 += 8; outptr0 += 32; } for (; remain > 0; remain--) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_fmadd_ps(_r01, _k00, _sum00); _sum00 = _mm256_fmadd_ps(_r02, _k01, _sum00); _sum00 = _mm256_fmadd_ps(_r03, _k02, _sum00); _sum00 = _mm256_fmadd_ps(_r11, _k10, _sum00); _sum00 = _mm256_fmadd_ps(_r12, _k11, _sum00); _sum00 = _mm256_fmadd_ps(_r13, _k12, _sum00); _sum00 = _mm256_fmadd_ps(_r21, _k20, _sum00); _sum00 = _mm256_fmadd_ps(_r22, _k21, _sum00); _sum00 = _mm256_fmadd_ps(_r23, _k22, _sum00); _mm256_storeu_ps(outptr0, _sum00); r0 += 2; r1 += 2; r2 += 2; outptr0 += 8; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * 8; } } }
test.c
#include <stdio.h> #pragma omp requires unified_shared_memory #define M (1024*1024) #define BUFF_SIZE (1*M) #define N (8*BUFF_SIZE) int b[N]; int Test(int start, int size) { int i; int errors = 0; for(i=0; i<start; i++) b[i] = -1; for(i=start; i<size; i++) b[i] = i; for(i=size; i<N; i++) b[i] = -1; #pragma omp target parallel for for(int i=start; i<size; i++) b[i] += 1; for(i=0; i<start && errors<25; i++) { if (b[i] != -1) printf("%4i: before, got %d, expected %d, %d error\n", i, b[i], -1, ++errors); } for(i=start; i<size && errors<25; i++) { if (b[i] != i+1) printf("%4i: in, got %d, expected %d, %d error\n", i, b[i], i+1, ++errors); } for(i=size; i<N && errors<25; i++) { if (b[i] != -1) printf("%4i: after, got %d, expected %d, %d error\n", i, b[i], -1, ++errors); } if (errors>0) { printf("success with start %d, size %d (%d mod buff size)\n\n", start, size, size % BUFF_SIZE); } else { printf("%d errors with start %d, size %d (%d mod buff size)\n\n", errors, start, size, size % BUFF_SIZE); } return (errors>0); } int main() { int offset[] = {0, 1, 2, BUFF_SIZE/2, BUFF_SIZE-2, BUFF_SIZE-1}; int onum = 6; int errors = 0; for(int s1=0; s1<6; s1++) { for(int s2=0; s2<6; s2++) { errors += Test(offset[s1], N-offset[s2]); if (errors>20) { printf("abort due to errors\n"); return errors; } } } printf("finished with %d errors\n", errors); return errors; }
serial_measurement.c
/* Calculating the value of pi using reduction : Serial Implementation Author : Omkar Damle. Date : August 2016. */ #include<stdio.h> #include<math.h> #include<omp.h> #include<time.h> #include<string.h> #include<stdlib.h> // Using the MONOTONIC clock #define CLK CLOCK_MONOTONIC /* Function to compute the difference between two points in time */ struct timespec diff(struct timespec start, struct timespec end); /* Function to computes the difference between two time instances Taken from - http://www.guyrutenberg.com/2007/09/22/profiling-code-using-clock_gettime/ Further reading: http://stackoverflow.com/questions/6749621/how-to-create-a-high-resolution-timer-in-linux-to-measure-program-performance http://stackoverflow.com/questions/3523442/difference-between-clock-realtime-and-clock-monotonic */ struct timespec diff(struct timespec start, struct timespec end){ struct timespec temp; if((end.tv_nsec-start.tv_nsec)<0){ temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else{ temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } int main(int argc, char* argv[]) { struct timespec start_e2e, end_e2e, start_alg, end_alg, e2e, alg; /* Should start before anything else */ clock_gettime(CLK, &start_e2e); /* Check if enough command-line arguments are taken in. */ if(argc < 3){ printf( "Usage: %s n p \n", argv[0] ); return -1; } int n=atoi(argv[1]); /* size of input array */ int p=atoi(argv[2]); /* number of processors*/ char *problem_name = "matrix_multiplication"; char *approach_name = "serial"; // char buffer[10]; // FILE* inputFile; FILE* outputFile; // inputFile = fopen(argv[3],"r"); char outputFileName[50]; sprintf(outputFileName,"output/%s_%s_%s_%s_output.txt",problem_name,approach_name,argv[1],argv[2]); clock_gettime(CLK, &start_alg); /* Start the algo timer */ int *a[n],*b[n],*c[n]; //counters for loops int i,j,k,l,m; //putting values in the matrices; for(i = 0;i < n;i++){ a[i] = (int *) malloc(n * sizeof(int)); b[i] = (int *) malloc(n * sizeof(int)); c[i] = (int *) malloc(n * sizeof(int)); for(j = 0; j < n; j++){ a[i][j] = 1; b[i][j] = 1; c[i][j] = 0; } } //Setting parameters for parallelizing the code clock_gettime(CLK, &start_alg); /* Start the algo timer */ /*----------------------Core algorithm starts here----------------------------------------------*/ int block_size = 2; omp_set_num_threads(p); //Matrix multiplication for (i = 0; i < n; i += block_size) { for (j = 0; j < n; j += block_size) { // #pragma omp parallel for collapse(2) for (k = 0; k < block_size; ++k) { for (l = 0; l < block_size; ++l) { for (m = 0; m < n; ++m) { // #pragma omp critical c[i + k][j + l] += a[i + k][m] * b[m][j + l]; } } } } } /*----------------------Core algorithm finished--------------------------------------------------*/ clock_gettime(CLK, &end_alg); /* End the algo timer */ /* Ensure that only the algorithm is present between these two timers. Further, the whole algorithm should be present. */ /* Should end before anything else (printing comes later) */ clock_gettime(CLK, &end_e2e); e2e = diff(start_e2e, end_e2e); alg = diff(start_alg, end_alg); // /*-----------REMOVE THIS SEGMENT. ONLY FOR DEBUGGING----------------*/ // for(i=0;i<n;i++){ // for(j=0;j<n;j++) // printf("%d ", c[i][j]); // printf("\n"); // } // /*-------------------------------------------------------------------*/ outputFile = fopen(outputFileName,"w"); // fprintf(outputFile,"%.8f\n",pi); /* problem_name,approach_name,n,p,e2e_sec,e2e_nsec,alg_sec,alg_nsec Change problem_name to whatever problem you've been assigned Change approach_name to whatever approach has been assigned p should be 0 for serial codes!! */ printf("%s,%s,%d,%d,%ld,%ld,%ld,%ld\n", problem_name, approach_name, n, p, e2e.tv_sec, e2e.tv_nsec, alg.tv_sec, alg.tv_nsec); return 0; }
ThreadedFriends.h
/****************************************************************/ /* Parallel Combinatorial BLAS Library (for Graph Computations) */ /* version 1.6 -------------------------------------------------*/ /* date: 6/15/2017 ---------------------------------------------*/ /* authors: Ariful Azad, Aydin Buluc --------------------------*/ /****************************************************************/ /* Copyright (c) 2010-2017, The Regents of the University of California Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _THREADED_FRIENDS_H_ #define _THREADED_FRIENDS_H_ #include <iostream> #include "SpMat.h" // Best to include the base class first #include "SpHelper.h" #include "StackEntry.h" #include "Isect.h" #include "Deleter.h" #include "SpImpl.h" #include "SpParHelper.h" #include "Compare.h" #include "CombBLAS.h" #include "PreAllocatedSPA.h" namespace combblas { template <class IU, class NU> class SpTuples; template <class IU, class NU> class SpDCCols; template <class IU, class NU> class Dcsc; // multithreaded HeapSpGEMM template <typename SR, typename NTO, typename IT, typename NT1, typename NT2> SpTuples<IT, NTO> * LocalSpGEMM (const SpDCCols<IT, NT1> & A, const SpDCCols<IT, NT2> & B, bool clearA, bool clearB) { IT mdim = A.getnrow(); IT ndim = B.getncol(); IT nnzA = A.getnnz(); if(A.isZero() || B.isZero()) { return new SpTuples<IT, NTO>(0, mdim, ndim); } Dcsc<IT,NT1>* Adcsc = A.GetDCSC(); Dcsc<IT,NT2>* Bdcsc = B.GetDCSC(); IT nA = A.getncol(); IT cnzmax = Adcsc->nz + Bdcsc->nz; // estimate on the size of resulting matrix C float cf = static_cast<float>(nA+1) / static_cast<float>(Adcsc->nzc); IT csize = static_cast<IT>(ceil(cf)); // chunk size IT * aux; Adcsc->ConstructAux(nA, aux); int numThreads = 1; // default case #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif IT* colnnzC = estimateNNZ(A, B); IT* colptrC = prefixsum<IT>(colnnzC, Bdcsc->nzc, numThreads); delete [] colnnzC; IT nnzc = colptrC[Bdcsc->nzc]; std::tuple<IT,IT,NTO> * tuplesC = static_cast<std::tuple<IT,IT,NTO> *> (::operator new (sizeof(std::tuple<IT,IT,NTO>[nnzc]))); // thread private space for heap and colinds std::vector<std::vector< std::pair<IT,IT>>> colindsVec(numThreads); std::vector<std::vector<HeapEntry<IT,NT1>>> globalheapVec(numThreads); for(int i=0; i<numThreads; i++) //inital allocation per thread, may be an overestimate, but does not require more memoty than inputs { colindsVec[i].resize(nnzA/numThreads); globalheapVec[i].resize(nnzA/numThreads); } #pragma omp parallel for for(int i=0; i < Bdcsc->nzc; ++i) { IT nnzcolB = Bdcsc->cp[i+1] - Bdcsc->cp[i]; //nnz in the current column of B int myThread = omp_get_thread_num(); if(colindsVec[myThread].size() < nnzcolB) //resize thread private vectors if needed { colindsVec[myThread].resize(nnzcolB); globalheapVec[myThread].resize(nnzcolB); } // colinds.first vector keeps indices to A.cp, i.e. it dereferences "colnums" vector (above), // colinds.second vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack) Adcsc->FillColInds(Bdcsc->ir + Bdcsc->cp[i], nnzcolB, colindsVec[myThread], aux, csize); std::pair<IT,IT> * colinds = colindsVec[myThread].data(); HeapEntry<IT,NT1> * wset = globalheapVec[myThread].data(); IT hsize = 0; for(IT j = 0; (unsigned)j < nnzcolB; ++j) // create the initial heap { if(colinds[j].first != colinds[j].second) // current != end { wset[hsize++] = HeapEntry< IT,NT1 > (Adcsc->ir[colinds[j].first], j, Adcsc->numx[colinds[j].first]); } } std:make_heap(wset, wset+hsize); IT curptr = colptrC[i]; while(hsize > 0) { std::pop_heap(wset, wset + hsize); // result is stored in wset[hsize-1] IT locb = wset[hsize-1].runr; // relative location of the nonzero in B's current column NTO mrhs = SR::multiply(wset[hsize-1].num, Bdcsc->numx[Bdcsc->cp[i]+locb]); if (!SR::returnedSAID()) { if( (curptr > colptrC[i]) && std::get<0>(tuplesC[curptr-1]) == wset[hsize-1].key) { std::get<2>(tuplesC[curptr-1]) = SR::add(std::get<2>(tuplesC[curptr-1]), mrhs); } else { tuplesC[curptr++]= std::make_tuple(wset[hsize-1].key, Bdcsc->jc[i], mrhs) ; } } if( (++(colinds[locb].first)) != colinds[locb].second) // current != end { // runr stays the same ! wset[hsize-1].key = Adcsc->ir[colinds[locb].first]; wset[hsize-1].num = Adcsc->numx[colinds[locb].first]; std::push_heap(wset, wset+hsize); } else { --hsize; } } } if(clearA) delete const_cast<SpDCCols<IT, NT1> *>(&A); if(clearB) delete const_cast<SpDCCols<IT, NT2> *>(&B); delete [] colptrC; delete [] aux; SpTuples<IT, NTO>* spTuplesC = new SpTuples<IT, NTO> (nnzc, mdim, ndim, tuplesC, true); return spTuplesC; } } #endif
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; /// A key method to reduce duplicate debug info from Sema. virtual void anchor(); ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 29; static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPFeaturesStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal swift_name /// attribute for the decl \p D. Raise a diagnostic if the name is invalid /// for the given declaration. /// /// For a function, this will validate a compound Swift name, /// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>, /// and the function will output the number of parameter names, and whether /// this is a single-arg initializer. /// /// For a type, enum constant, property, or variable declaration, this will /// validate either a simple identifier, or a qualified /// <code>context.identifier</code> name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation ArgLoc, const IdentifierInfo *AttrName); private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as a non-type, and an expression representing /// that name has been formed. NC_ContextIndependentExpr, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification ContextIndependentExpr(ExprResult E) { NameClassification Result(NC_ContextIndependentExpr); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_ContextIndependentExpr); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range); bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name, bool Override); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); /// Check whether the given type-dependent expression will be the name of a /// function or another callable function-like entity (e.g. a function // template or overload set) for any substitution. bool IsDependentFunctionNameExpr(Expr *E); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied because it was ill-formed. void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation, StringRef Diagnostic); void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old, SourceLocation New); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block, /// A type constraint, UPPC_TypeConstraint }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// Called to set rounding mode for floating point operations. void setRoundingMode(LangOptions::FPRoundingModeKind); /// Called to set exception behavior for floating point operations. void setExceptionMode(LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = std::string(Ext); } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckForDelayedContext = true); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckCaller = true); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); /// Marks all the functions that might be required for the currently active /// OpenMP context. void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse); public: /// Struct to store the context selectors info for declare variant directive. /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation DepLinMapLastLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
thd_info.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "thd_info.h" /****************************************************************************** * PRIVATE FUNCTIONS *****************************************************************************/ /** * @brief Perform a parallel SUM reduction. * * @param thds The thread structure we are using in the reduction. * @param scratchid Which scratch array to reduce. * @param nelems How many elements in the scratch array. */ static inline void p_reduce_sum( thd_info * const thds, idx_t const scratchid, idx_t const nelems) { int const tid = splatt_omp_get_thread_num(); int const nthreads = splatt_omp_get_num_threads(); val_t * const myvals = (val_t *) thds[tid].scratch[scratchid]; int half = nthreads / 2; while(half > 0) { if(tid < half && tid + half < nthreads) { val_t const * const target = (val_t *) thds[tid+half].scratch[scratchid]; for(idx_t i=0; i < nelems; ++i) { myvals[i] += target[i]; } } #pragma omp barrier /* check for odd number */ #pragma omp master if(half > 1 && half % 2 == 1) { val_t const * const last = (val_t *) thds[half-1].scratch[scratchid]; for(idx_t i=0; i < nelems; ++i) { myvals[i] += last[i]; } } /* next iteration */ half /= 2; } /* account for odd thread at end */ #pragma omp master { if(nthreads % 2 == 1) { val_t const * const last = (val_t *) thds[nthreads-1].scratch[scratchid]; for(idx_t i=0; i < nelems; ++i) { myvals[i] += last[i]; } } } #pragma omp barrier } /** * @brief Perform a parallel MAX reduction. * * @param thds The thread structure we are using in the reduction. * @param scratchid Which scratch array to reduce. * @param nelems How many elements in the scratch array. */ static inline void p_reduce_max( thd_info * const thds, idx_t const scratchid, idx_t const nelems) { int const tid = splatt_omp_get_thread_num(); int const nthreads = splatt_omp_get_num_threads(); val_t * const myvals = (val_t *) thds[tid].scratch[scratchid]; int half = nthreads / 2; while(half > 0) { if(tid < half && tid + half < nthreads) { val_t const * const target = (val_t *) thds[tid+half].scratch[scratchid]; for(idx_t i=0; i < nelems; ++i) { myvals[i] = SS_MAX(myvals[i], target[i]); } } #pragma omp barrier /* check for odd number */ #pragma omp master if(half > 1 && half % 2 == 1) { val_t const * const last = (val_t *) thds[half-1].scratch[scratchid]; for(idx_t i=0; i < nelems; ++i) { myvals[i] = SS_MAX(myvals[i], last[i]); } } /* next iteration */ half /= 2; } /* account for odd thread at end */ #pragma omp master { if(nthreads % 2 == 1) { val_t const * const last = (val_t *) thds[nthreads-1].scratch[scratchid]; for(idx_t i=0; i < nelems; ++i) { myvals[i] = SS_MAX(myvals[i], last[i]); } } } #pragma omp barrier } /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ void thd_reduce( thd_info * const thds, idx_t const scratchid, idx_t const nelems, splatt_reduce_type const which) { if(splatt_omp_get_num_threads() == 1) { return; } /* just to be safe in case any thread data is being copied */ #pragma omp barrier switch(which) { case REDUCE_SUM: p_reduce_sum(thds, scratchid, nelems); break; case REDUCE_MAX: p_reduce_max(thds, scratchid, nelems); break; default: fprintf(stderr, "SPLATT: thd_reduce supports SUM and MAX only.\n"); abort(); } } thd_info * thd_init( idx_t const nthreads, idx_t const nscratch, ...) { thd_info * thds = (thd_info *) malloc(nthreads * sizeof(thd_info)); for(idx_t t=0; t < nthreads; ++t) { timer_reset(&thds[t].ttime); thds[t].nscratch = nscratch; thds[t].scratch = (void **) malloc(nscratch * sizeof(void*)); } va_list args; va_start(args, nscratch); for(idx_t s=0; s < nscratch; ++s) { idx_t const bytes = va_arg(args, idx_t); for(idx_t t=0; t < nthreads; ++t) { thds[t].scratch[s] = (void *) malloc(bytes); memset(thds[t].scratch[s], 0, bytes); } } va_end(args); return thds; } void thd_times( thd_info * thds, idx_t const nthreads) { for(idx_t t=0; t < nthreads; ++t) { printf(" thread: %"SPLATT_PF_IDX" %0.3fs\n", t, thds[t].ttime.seconds); } } void thd_time_stats( thd_info * thds, idx_t const nthreads) { double max_time = 0.; double avg_time = 0.; for(idx_t t=0; t < nthreads; ++t) { avg_time += thds[t].ttime.seconds; max_time = SS_MAX(max_time, thds[t].ttime.seconds); } avg_time /= nthreads; double const imbal = (max_time - avg_time) / max_time; printf(" avg: %0.3fs max: %0.3fs (%0.1f%% imbalance)\n", avg_time, max_time, 100. * imbal); } void thd_reset( thd_info * thds, idx_t const nthreads) { for(idx_t t=0; t < nthreads; ++t) { timer_reset(&thds[t].ttime); } } void thd_free( thd_info * thds, idx_t const nthreads) { for(idx_t t=0; t < nthreads; ++t) { for(idx_t s=0; s < thds[t].nscratch; ++s) { free(thds[t].scratch[s]); } free(thds[t].scratch); } free(thds); }
gmm.c
/** @file gmm.c ** @brief Gaussian Mixture Models - Implementation ** @author David Novotny ** @author Andrea Vedaldi **/ /* Copyright (C) 2013 David Novotny and Andrea Vedaldi. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ /** <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @page gmm Gaussian Mixture Models (GMM) @author David Novotny @author Andrea Vedaldi @tableofcontents <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @ref gmm.h is an implementation of *Gaussian Mixture Models* (GMMs). The main functionality provided by this module is learning GMMs from data by maximum likelihood. Model optimization uses the Expectation Maximization (EM) algorithm @cite{dempster77maximum}. The implementation supports @c float or @c double data types, is parallelized, and is tuned to work reliably and effectively on datasets of visual features. Stability is obtained in part by regularizing and restricting the parameters of the GMM. @ref gmm-starting demonstreates how to use the C API to compute the FV representation of an image. For further details refer to: - @subpage gmm-fundamentals <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section gmm-starting Getting started <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> In order to use @ref gmm.h to learn a GMM from training data, create a new ::VlGMM object instance, set the parameters as desired, and run the training code. The following example learns @c numClusters Gaussian components from @c numData vectors of dimension @c dimension and storage class @c float using at most 100 EM iterations: @code float * means ; float * covariances ; float * priors ; float * posteriors ; double loglikelihood ; // create a new instance of a GMM object for float data gmm = vl_gmm_new (VL_TYPE_FLOAT, dimension, numClusters) ; // set the maximum number of EM iterations to 100 vl_gmm_set_max_num_iterations (gmm, 100) ; // set the initialization to random selection vl_gmm_set_initialization (gmm,VlGMMRand); // cluster the data, i.e. learn the GMM vl_gmm_cluster (gmm, data, numData); // get the means, covariances, and priors of the GMM means = vl_gmm_get_means(gmm); covariances = vl_gmm_get_covariances(gmm); priors = vl_gmm_get_priors(gmm); // get loglikelihood of the estimated GMM loglikelihood = vl_gmm_get_loglikelihood(gmm) ; // get the soft assignments of the data points to each cluster posteriors = vl_gmm_get_posteriors(gmm) ; @endcode @note ::VlGMM assumes that the covariance matrices of the GMM are diagonal. This reduces significantly the number of parameters to learn and is usually an acceptable compromise in vision applications. If the data is significantly correlated, it can be beneficial to de-correlate it by PCA rotation or projection in pre-processing. ::vl_gmm_get_loglikelihood is used to get the final loglikelihood of the estimated mixture, ::vl_gmm_get_means and ::vl_gmm_get_covariances to obtain the means and the diagonals of the covariance matrices of the estimated Gaussian modes, and ::vl_gmm_get_posteriors to get the posterior probabilities that a given point is associated to each of the modes (soft assignments). The learning algorithm, which uses EM, finds a local optimum of the objective function. Therefore the initialization is crucial in obtaining a good model, measured in term of the final loglikelihood. ::VlGMM supports a few methods (use ::vl_gmm_set_initialization to choose one) as follows: Method | ::VlGMMInitialization enumeration | Description ----------------------|-----------------------------------------|----------------------------------------------- Random initialization | ::VlGMMRand | Random initialization of the mixture parameters KMeans | ::VlGMMKMeans | Initialization of the mixture parameters using ::VlKMeans Custom | ::VlGMMCustom | User specified initialization Note that in the case of ::VlGMMKMeans initialization, an object of type ::VlKMeans object must be created and passed to the ::VlGMM instance (see @ref kmeans to see how to correctly set up this object). When a user wants to use the ::VlGMMCustom method, the initial means, covariances and priors have to be specified using the ::vl_gmm_set_means, ::vl_gmm_set_covariances and ::vl_gmm_set_priors methods. **/ /** <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @page gmm-fundamentals GMM fundamentals @tableofcontents <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> A *Gaussian Mixture Model* (GMM) is a mixture of $K$ multivariate Gaussian distributions. In order to sample from a GMM, one samples first the component index $k \in \{1,\dots,K\}$ with *prior probability* $\pi_k$, and then samples the vector $\bx \in \mathbb{R}^d$ from the $k$-th Gaussian distribution $p(\bx|\mu_k,\Sigma_k)$. Here $\mu_k$ and $\Sigma_k$ are respectively the *mean* and *covariance* of the distribution. The GMM is completely specified by the parameters $\Theta=\{\pi_k,\mu_k,\Sigma_k; k = 1,\dots,K\}$ The density $p(\bx|\Theta)$ induced on the training data is obtained by marginalizing the component selector $k$, obtaining \[ p(\bx|\Theta) = \sum_{k=1}^{K} \pi_k p( \bx_i |\mu_k,\Sigma_k), \qquad p( \bx |\mu_k,\Sigma_k) = \frac{1}{\sqrt{(2\pi)^d\det\Sigma_k}} \exp\left[ -\frac{1}{2} (\bx-\mu_k)^\top\Sigma_k^{-1}(\bx-\mu_k) \right]. \] Learning a GMM to fit a dataset $X=(\bx_1, \dots, \bx_n)$ is usually done by maximizing the log-likelihood of the data: @f[ \ell(\Theta;X) = E_{\bx\sim\hat p} [ \log p(\bx|\Theta) ] = \frac{1}{n}\sum_{i=1}^{n} \log \sum_{k=1}^{K} \pi_k p(\bx_i|\mu_k, \Sigma_k) @f] where $\hat p$ is the empirical distribution of the data. An algorithm to solve this problem is introduced next. <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section gmm-em Learning a GMM by expectation maximization <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> The direct maximization of the log-likelihood function of a GMM is difficult due to the fact that the assignments of points to Gaussian mode is not observable and, as such, must be treated as a latent variable. Usually, GMMs are learned by using the *Expectation Maximization* (EM) algorithm @cite{dempster77maximum}. Consider in general the problem of estimating to the maximum likelihood a distribution $p(x|\Theta) = \int p(x,h|\Theta)\,dh$, where $x$ is a measurement, $h$ is a *latent variable*, and $\Theta$ are the model parameters. By introducing an auxiliary distribution $q(h|x)$ on the latent variable, one can use Jensen inequality to obtain the following lower bound on the log-likelihood: @f{align*} \ell(\Theta;X) = E_{x\sim\hat p} \log p(x|\Theta) &= E_{x\sim\hat p} \log \int p(x,h|\Theta) \,dh \\ &= E_{x\sim\hat p} \log \int \frac{p(x,h|\Theta)}{q(h|x)} q(h|x)\,dh \\ &\geq E_{x\sim\hat p} \int q(h) \log \frac{p(x,h|\Theta)}{q(h|x)}\,dh \\ &= E_{(x,q) \sim q(h|x) \hat p(x)} \log p(x,h|\Theta) - E_{(x,q) \sim q(h|x) \hat p(x)} \log q(h|x) @f} The first term of the last expression is the log-likelihood of the model where both the $x$ and $h$ are observed and joinlty distributed as $q(x|h)\hat p(x)$; the second term is the a average entropy of the latent variable, which does not depend on $\Theta$. This lower bound is maximized and becomes tight by setting $q(h|x) = p(h|x,\Theta)$ to be the posterior distribution on the latent variable $h$ (given the current estimate of the parameters $\Theta$). In fact: \[ E_{x \sim \hat p} \log p(x|\Theta) = E_{(x,h) \sim p(h|x,\Theta) \hat p(x)}\left[ \log \frac{p(x,h|\Theta)}{p(h|x,\Theta)} \right] = E_{(x,h) \sim p(h|x,\Theta) \hat p(x)} [ \log p(x|\Theta) ] = \ell(\Theta;X). \] EM alternates between updating the latent variable auxiliary distribution $q(h|x) = p(h|x,\Theta_t)$ (*expectation step*) given the current estimate of the parameters $\Theta_t$, and then updating the model parameters $\Theta_{t+1}$ by maximizing the log-likelihood lower bound derived (*maximization step*). The simplification is that in the maximization step both $x$ and $h$ are now ``observed'' quantities. This procedure converges to a local optimum of the model log-likelihood. @subsection gmm-expectation-step Expectation step In the case of a GMM, the latent variables are the point-to-cluster assignments $k_i, i=1,\dots,n$, one for each of $n$ data points. The auxiliary distribution $q(k_i|\bx_i) = q_{ik}$ is a matrix with $n \times K$ entries. Each row $q_{i,:}$ can be thought of as a vector of soft assignments of the data points $\bx_i$ to each of the Gaussian modes. Setting $q_{ik} = p(k_i | \bx_i, \Theta)$ yields \[ q_{ik} = \frac {\pi_k p(\bx_i|\mu_k,\Sigma_k)} {\sum_{l=1}^K \pi_l p(\bx_i|\mu_l,\Sigma_l)} \] where the Gaussian density $p(\bx_i|\mu_k,\Sigma_k)$ was given above. One important point to keep in mind when these probabilities are computed is the fact that the Gaussian densities may attain very low values and underflow in a vanilla implementation. Furthermore, VLFeat GMM implementation restricts the covariance matrices to be diagonal. In this case, the computation of the determinant of $\Sigma_k$ reduces to computing the trace of the matrix and the inversion of $\Sigma_k$ could be obtained by inverting the elements on the diagonal of the covariance matrix. @subsection gmm-maximization-step Maximization step The M step estimates the parameters of the Gaussian mixture components and the prior probabilities $\pi_k$ given the auxiliary distribution on the point-to-cluster assignments computed in the E step. Since all the variables are now ``observed'', the estimate is quite simple. For example, the mean $\mu_k$ of a Gaussian mode is obtained as the mean of the data points assigned to it (accounting for the strength of the soft assignments). The other quantities are obtained in a similar manner, yielding to: @f{align*} \mu_k &= { { \sum_{i=1}^n q_{ik} \bx_{i} } \over { \sum_{i=1}^n q_{ik} } }, \\ \Sigma_k &= { { \sum_{i=1}^n { q_{ik} (\bx_{i} - \mu_{k}) {(\bx_{i} - \mu_{k})}^T } } \over { \sum_{i=1}^n q_{ik} } }, \\ \pi_k &= { \sum_{i=1}^n { q_{ik} } \over { \sum_{i=1}^n \sum_{l=1}^K q_{il} } }. @f} <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section gmm-fundamentals-init Initialization algorithms <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> The EM algorithm is a local optimization method. As such, the quality of the solution strongly depends on the quality of the initial values of the parameters (i.e. of the locations and shapes of the Gaussian modes). @ref gmm.h supports the following cluster initialization algorithms: - <b>Random data points.</b> (::vl_gmm_init_with_rand_data) This method sets the means of the modes by sampling at random a corresponding number of data points, sets the covariance matrices of all the modes are to the covariance of the entire dataset, and sets the prior probabilities of the Gaussian modes to be uniform. This initialization method is the fastest, simplest, as well as the one most likely to end in a bad local minimum. - <b>KMeans initialization</b> (::vl_gmm_init_with_kmeans) This method uses KMeans to pre-cluster the points. It then sets the means and covariances of the Gaussian distributions the sample means and covariances of each KMeans cluster. It also sets the prior probabilities to be proportional to the mass of each cluster. In order to use this initialization method, a user can specify an instance of ::VlKMeans by using the function ::vl_gmm_set_kmeans_init_object, or let ::VlGMM create one automatically. Alternatively, one can manually specify a starting point (::vl_gmm_set_priors, ::vl_gmm_set_means, ::vl_gmm_set_covariances). **/ #include "gmm.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #ifndef VL_DISABLE_SSE2 #include "mathop_sse2.h" #endif #ifndef VL_DISABLE_AVX #include "mathop_avx.h" #endif /* ---------------------------------------------------------------- */ #ifndef VL_GMM_INSTANTIATING /* ---------------------------------------------------------------- */ #define VL_GMM_MIN_VARIANCE 1e-6 #define VL_GMM_MIN_POSTERIOR 1e-2 #define VL_GMM_MIN_PRIOR 1e-6 struct _VlGMM { vl_type dataType ; /**< Data type. */ vl_size dimension ; /**< Data dimensionality. */ vl_size numClusters ; /**< Number of clusters */ vl_size numData ; /**< Number of last time clustered data points. */ vl_size maxNumIterations ; /**< Maximum number of refinement iterations. */ vl_size numRepetitions ; /**< Number of clustering repetitions. */ int verbosity ; /**< Verbosity level. */ void * means; /**< Means of Gaussian modes. */ void * covariances; /**< Diagonals of covariance matrices of Gaussian modes. */ void * priors; /**< Weights of Gaussian modes. */ void * posteriors; /**< Probabilities of correspondences of points to clusters. */ double * sigmaLowBound ; /**< Lower bound on the diagonal covariance values. */ VlGMMInitialization initialization; /**< Initialization option */ VlKMeans * kmeansInit; /**< Kmeans object for initialization of gaussians */ double LL ; /**< Current solution loglikelihood */ vl_bool kmeansInitIsOwner; /**< Indicates whether a user provided the kmeans initialization object */ } ; /* ---------------------------------------------------------------- */ /* Life-cycle */ /* ---------------------------------------------------------------- */ static void _vl_gmm_prepare_for_data (VlGMM* self, vl_size numData) { if (self->numData < numData) { vl_free(self->posteriors) ; self->posteriors = vl_malloc(vl_get_type_size(self->dataType) * numData * self->numClusters) ; } self->numData = numData ; } /** @brief Create a new GMM object ** @param dataType type of data (::VL_TYPE_FLOAT or ::VL_TYPE_DOUBLE) ** @param dimension dimension of the data. ** @param numComponents number of Gaussian mixture components. ** @return new GMM object instance. **/ VlGMM * vl_gmm_new (vl_type dataType, vl_size dimension, vl_size numComponents) { vl_index i ; vl_size size = vl_get_type_size(dataType) ; VlGMM * self = vl_calloc(1, sizeof(VlGMM)) ; self->dataType = dataType; self->numClusters = numComponents ; self->numData = 0; self->dimension = dimension ; self->initialization = VlGMMRand; self->verbosity = 0 ; self->maxNumIterations = 50; self->numRepetitions = 1; self->sigmaLowBound = NULL ; self->priors = NULL ; self->covariances = NULL ; self->means = NULL ; self->posteriors = NULL ; self->kmeansInit = NULL ; self->kmeansInitIsOwner = VL_FALSE; self->priors = vl_calloc (numComponents, size) ; self->means = vl_calloc (numComponents * dimension, size) ; self->covariances = vl_calloc (numComponents * dimension, size) ; self->sigmaLowBound = vl_calloc (dimension, sizeof(double)) ; for (i = 0 ; i < (unsigned)self->dimension ; ++i) { self->sigmaLowBound[i] = 1e-4 ; } return self ; } /** @brief Reset state ** @param self object. ** ** The function reset the state of the GMM object. It deletes ** any stored posterior and other internal state variables. **/ void vl_gmm_reset (VlGMM * self) { if (self->posteriors) { vl_free(self->posteriors) ; self->posteriors = NULL ; self->numData = 0 ; } if (self->kmeansInit && self->kmeansInitIsOwner) { vl_kmeans_delete(self->kmeansInit) ; self->kmeansInit = NULL ; self->kmeansInitIsOwner = VL_FALSE ; } } /** @brief Deletes a GMM object ** @param self GMM object instance. ** ** The function deletes the GMM object instance created ** by ::vl_gmm_new. **/ void vl_gmm_delete (VlGMM * self) { if(self->means) vl_free(self->means); if(self->covariances) vl_free(self->covariances); if(self->priors) vl_free(self->priors); if(self->posteriors) vl_free(self->posteriors); if(self->kmeansInit && self->kmeansInitIsOwner) { vl_kmeans_delete(self->kmeansInit); } vl_free(self); } /* ---------------------------------------------------------------- */ /* Getters and setters */ /* ---------------------------------------------------------------- */ /** @brief Get data type ** @param self object ** @return data type. **/ vl_type vl_gmm_get_data_type (VlGMM const * self) { return self->dataType ; } /** @brief Get the number of clusters ** @param self object ** @return number of clusters. **/ vl_size vl_gmm_get_num_clusters (VlGMM const * self) { return self->numClusters ; } /** @brief Get the number of data points ** @param self object ** @return number of data points. **/ vl_size vl_gmm_get_num_data (VlGMM const * self) { return self->numData ; } /** @brief Get the log likelihood of the current mixture ** @param self object ** @return loglikelihood. **/ double vl_gmm_get_loglikelihood (VlGMM const * self) { return self->LL ; } /** @brief Get verbosity level ** @param self object ** @return verbosity level. **/ int vl_gmm_get_verbosity (VlGMM const * self) { return self->verbosity ; } /** @brief Set verbosity level ** @param self object ** @param verbosity verbosity level. **/ void vl_gmm_set_verbosity (VlGMM * self, int verbosity) { self->verbosity = verbosity ; } /** @brief Get means ** @param self object ** @return cluster means. **/ void const * vl_gmm_get_means (VlGMM const * self) { return self->means ; } /** @brief Get covariances ** @param self object ** @return diagonals of cluster covariance matrices. **/ void const * vl_gmm_get_covariances (VlGMM const * self) { return self->covariances ; } /** @brief Get priors ** @param self object ** @return priors of cluster gaussians. **/ void const * vl_gmm_get_priors (VlGMM const * self) { return self->priors ; } /** @brief Get posteriors ** @param self object ** @return posterior probabilities of cluster memberships. **/ void const * vl_gmm_get_posteriors (VlGMM const * self) { return self->posteriors ; } /** @brief Get maximum number of iterations ** @param self object ** @return maximum number of iterations. **/ vl_size vl_gmm_get_max_num_iterations (VlGMM const * self) { return self->maxNumIterations ; } /** @brief Set maximum number of iterations ** @param self VlGMM filter. ** @param maxNumIterations maximum number of iterations. **/ void vl_gmm_set_max_num_iterations (VlGMM * self, vl_size maxNumIterations) { self->maxNumIterations = maxNumIterations ; } /** @brief Get maximum number of repetitions. ** @param self object ** @return current number of repretitions for quantization. **/ vl_size vl_gmm_get_num_repetitions (VlGMM const * self) { return self->numRepetitions ; } /** @brief Set maximum number of repetitions ** @param self object ** @param numRepetitions maximum number of repetitions. ** The number of repetitions cannot be smaller than 1. **/ void vl_gmm_set_num_repetitions (VlGMM * self, vl_size numRepetitions) { assert (numRepetitions >= 1) ; self->numRepetitions = numRepetitions ; } /** @brief Get data dimension ** @param self object ** @return data dimension. **/ vl_size vl_gmm_get_dimension (VlGMM const * self) { return self->dimension ; } /** @brief Get initialization algorithm ** @param self object ** @return initialization algorithm. **/ VlGMMInitialization vl_gmm_get_initialization (VlGMM const * self) { return self->initialization ; } /** @brief Set initialization algorithm. ** @param self object ** @param init initialization algorithm. **/ void vl_gmm_set_initialization (VlGMM * self, VlGMMInitialization init) { self->initialization = init; } /** @brief Get KMeans initialization object. ** @param self object ** @return kmeans initialization object. **/ VlKMeans * vl_gmm_get_kmeans_init_object (VlGMM const * self) { return self->kmeansInit; } /** @brief Set KMeans initialization object. ** @param self object ** @param kmeans initialization KMeans object. **/ void vl_gmm_set_kmeans_init_object (VlGMM * self, VlKMeans * kmeans) { if (self->kmeansInit && self->kmeansInitIsOwner) { vl_kmeans_delete(self->kmeansInit) ; } self->kmeansInit = kmeans; self->kmeansInitIsOwner = VL_FALSE; } /** @brief Get the lower bound on the diagonal covariance values. ** @param self object ** @return lower bound on covariances. **/ double const * vl_gmm_get_covariance_lower_bounds (VlGMM const * self) { return self->sigmaLowBound; } /** @brief Set the lower bounds on diagonal covariance values. ** @param self object. ** @param bounds bounds. ** ** There is one lower bound per dimension. Use ::vl_gmm_set_covariance_lower_bound ** to set all of them to a given scalar. **/ void vl_gmm_set_covariance_lower_bounds (VlGMM * self, double const * bounds) { memcpy(self->sigmaLowBound, bounds, sizeof(double) * self->dimension) ; } /** @brief Set the lower bounds on diagonal covariance values. ** @param self object. ** @param bound bound. ** ** While there is one lower bound per dimension, this function sets ** all of them to the specified scalar. Use ::vl_gmm_set_covariance_lower_bounds ** to set them individually. **/ void vl_gmm_set_covariance_lower_bound (VlGMM * self, double bound) { int i ; for (i = 0 ; i < (signed)self->dimension ; ++i) { self->sigmaLowBound[i] = bound ; } } /* ---------------------------------------------------------------- */ /* Instantiate shuffle algorithm */ #define VL_SHUFFLE_type vl_uindex #define VL_SHUFFLE_prefix _vl_gmm #include "shuffle-def.h" /* #ifdef VL_GMM_INSTANTITATING */ #endif /* ---------------------------------------------------------------- */ #ifdef VL_GMM_INSTANTIATING /* ---------------------------------------------------------------- */ /* ---------------------------------------------------------------- */ /* Posterior assignments */ /* ---------------------------------------------------------------- */ /** @fn vl_get_gmm_data_posterior_f(float*,vl_size,vl_size,float const*,float const*,vl_size,float const*,float const*) ** @brief Get Gaussian modes posterior probabilities ** @param posteriors posterior probabilities (output)/ ** @param numClusters number of modes in the GMM model. ** @param numData number of data elements. ** @param priors prior mode probabilities of the GMM model. ** @param means means of the GMM model. ** @param dimension data dimension. ** @param covariances diagonal covariances of the GMM model. ** @param data data. ** @return data log-likelihood. ** ** This is a helper function that does not require a ::VlGMM object ** instance to operate. **/ double VL_XCAT(vl_get_gmm_data_posteriors_, SFX) (TYPE * posteriors, vl_size numClusters, vl_size numData, TYPE const * priors, TYPE const * means, vl_size dimension, TYPE const * covariances, TYPE const * data) { vl_index i_d, i_cl; vl_size dim; double LL = 0; TYPE halfDimLog2Pi = (dimension / 2.0) * log(2.0*VL_PI); TYPE * logCovariances ; TYPE * logWeights ; TYPE * invCovariances ; #if (FLT == VL_TYPE_FLOAT) VlFloatVector3ComparisonFunction distFn = vl_get_vector_3_comparison_function_f(VlDistanceMahalanobis) ; #else VlDoubleVector3ComparisonFunction distFn = vl_get_vector_3_comparison_function_d(VlDistanceMahalanobis) ; #endif logCovariances = vl_malloc(sizeof(TYPE) * numClusters) ; invCovariances = vl_malloc(sizeof(TYPE) * numClusters * dimension) ; logWeights = vl_malloc(sizeof(TYPE) * numClusters) ; #if defined(_OPENMP) #pragma omp parallel for private(i_cl,dim) num_threads(vl_get_max_threads()) #endif for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) { TYPE logSigma = 0 ; if (priors[i_cl] < VL_GMM_MIN_PRIOR) { logWeights[i_cl] = - (TYPE) VL_INFINITY_D ; } else { logWeights[i_cl] = log(priors[i_cl]); } for(dim = 0 ; dim < dimension ; ++ dim) { logSigma += log(covariances[i_cl*dimension + dim]); invCovariances [i_cl*dimension + dim] = (TYPE) 1.0 / covariances[i_cl*dimension + dim]; } logCovariances[i_cl] = logSigma; } /* end of parallel region */ #if defined(_OPENMP) #pragma omp parallel for private(i_cl,i_d) reduction(+:LL) \ num_threads(vl_get_max_threads()) #endif for (i_d = 0 ; i_d < (signed)numData ; ++ i_d) { TYPE clusterPosteriorsSum = 0; TYPE maxPosterior = (TYPE)(-VL_INFINITY_D) ; for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) { TYPE p = logWeights[i_cl] - halfDimLog2Pi - 0.5 * logCovariances[i_cl] - 0.5 * distFn (dimension, data + i_d * dimension, means + i_cl * dimension, invCovariances + i_cl * dimension) ; posteriors[i_cl + i_d * numClusters] = p ; if (p > maxPosterior) { maxPosterior = p ; } } for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) { TYPE p = posteriors[i_cl + i_d * numClusters] ; p = exp(p - maxPosterior) ; posteriors[i_cl + i_d * numClusters] = p ; clusterPosteriorsSum += p ; } LL += log(clusterPosteriorsSum) + (double) maxPosterior ; for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) { posteriors[i_cl + i_d * numClusters] /= clusterPosteriorsSum ; } } /* end of parallel region */ vl_free(logCovariances); vl_free(logWeights); vl_free(invCovariances); return LL; } /* ---------------------------------------------------------------- */ /* Restarts zero-weighted Gaussians */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_gmm_maximization_, SFX) (VlGMM * self, TYPE * posteriors, TYPE * priors, TYPE * covariances, TYPE * means, TYPE const * data, vl_size numData) ; static vl_size VL_XCAT(_vl_gmm_restart_empty_modes_, SFX) (VlGMM * self, TYPE const * data) { vl_size dimension = self->dimension; vl_size numClusters = self->numClusters; vl_index i_cl, j_cl, i_d, d; vl_size zeroWNum = 0; TYPE * priors = (TYPE*)self->priors ; TYPE * means = (TYPE*)self->means ; TYPE * covariances = (TYPE*)self->covariances ; TYPE * posteriors = (TYPE*)self->posteriors ; //VlRand * rand = vl_get_rand() ; TYPE * mass = vl_calloc(sizeof(TYPE), self->numClusters) ; if (numClusters <= 1) { return 0 ; } /* compute statistics */ { vl_uindex i, k ; vl_size numNullAssignments = 0 ; for (i = 0 ; i < self->numData ; ++i) { for (k = 0 ; k < self->numClusters ; ++k) { TYPE p = ((TYPE*)self->posteriors)[k + i * self->numClusters] ; mass[k] += p ; if (p < VL_GMM_MIN_POSTERIOR) { numNullAssignments ++ ; } } } if (self->verbosity) { VL_PRINTF("gmm: sparsity of data posterior: %.1f%%\n", (double)numNullAssignments / (self->numData * self->numClusters) * 100) ; } } #if 0 /* search for cluster with negligible weight and reassign them to fat clusters */ for (i_cl = 0 ; i_cl < numClusters ; ++i_cl) { if (priors[i_cl] < 0.00001/numClusters) { double mass = priors[0] ; vl_index best = 0 ; for (j_cl = 1 ; j_cl < numClusters ; ++j_cl) { if (priors[j_cl] > mass) { mass = priors[j_cl] ; best = j_cl ; } } if (j_cl == i_cl) { /* this should never happen */ continue ; } j_cl = best ; zeroWNum ++ ; VL_PRINTF("gmm: restarting mode %d by splitting mode %d (with prior %f)\n", i_cl,j_cl,mass) ; priors[i_cl] = mass/2 ; priors[j_cl] = mass/2 ; for (d = 0 ; d < dimension ; ++d) { TYPE sigma2 = covariances[j_cl*dimension + d] ; TYPE sigma = VL_XCAT(vl_sqrt_,SFX)(sigma2) ; means[i_cl*dimension + d] = means[j_cl*dimension + d] + 0.001 * (vl_rand_real1(rand) - 0.5) * sigma ; covariances[i_cl*dimension + d] = sigma2 ; } } } #endif /* search for cluster with negligible weight and reassign them to fat clusters */ for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) { double size = - VL_INFINITY_D ; vl_index best = -1 ; if (mass[i_cl] >= VL_GMM_MIN_POSTERIOR * VL_MAX(1.0, (double) self->numData / self->numClusters)) { continue ; } if (self->verbosity) { VL_PRINTF("gmm: mode %d is nearly empty (mass %f)\n", i_cl, mass[i_cl]) ; } /* Search for the cluster that (approximately) maximally contribute to make the log-likelihood small. */ for (j_cl = 0 ; j_cl < (signed)numClusters ; ++j_cl) { double size_ ; if (priors[j_cl] < VL_GMM_MIN_PRIOR) { continue ; } size_ = - 0.5 * (1.0 + log(2*VL_PI)) ; for(d = 0 ; d < (signed)dimension ; d++) { double sigma2 = covariances[j_cl * dimension + d] ; size_ -= 0.5 * log(sigma2) ; } size_ *= priors[j_cl] ; if (self->verbosity > 2) { VL_PRINTF("gmm: mode %d: prior %f, mass %f, score %f\n", j_cl, priors[j_cl], mass[j_cl], size_) ; } if (size_ > size) { size = size_ ; best = j_cl ; } } j_cl = best ; if (j_cl == i_cl || j_cl < 0) { if (self->verbosity) { VL_PRINTF("gmm: mode %d is empty, " "but no other mode to split could be found\n", i_cl) ; } continue ; } if (self->verbosity) { VL_PRINTF("gmm: reinitializing empty mode %d with mode %d (prior %f, mass %f, score %f)\n", i_cl, j_cl, priors[j_cl], mass[j_cl], size) ; } /* Search for the dimension with maximum variance. */ size = - VL_INFINITY_D ; best = - 1 ; for(d = 0; d < (signed)dimension; d++) { double sigma2 = covariances[j_cl * dimension + d] ; if (sigma2 > size) { size = sigma2 ; best = d ; } } /* Reassign points j_cl (mode to split) to i_cl (empty mode). */ { TYPE mu = means[best + j_cl * self->dimension] ; for(i_d = 0 ; i_d < (signed)self->numData ; ++ i_d) { TYPE p = posteriors[j_cl + self->numClusters * i_d] ; TYPE q = posteriors[i_cl + self->numClusters * i_d] ; /* ~= 0 */ if (data[best + i_d * self->dimension] < mu) { /* assign this point to i_cl */ posteriors[i_cl + self->numClusters * i_d] += p ; posteriors[j_cl + self->numClusters * i_d] = 0 ; } else { /* assign this point to j_cl */ posteriors[i_cl + self->numClusters * i_d] = 0 ; posteriors[j_cl + self->numClusters * i_d] += q ; } } } /* Re-estimate. */ VL_XCAT(_vl_gmm_maximization_, SFX) (self,posteriors,priors,covariances,means,data,self->numData) ; } return zeroWNum; } /* ---------------------------------------------------------------- */ /* Helpers */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_gmm_apply_bounds_, SFX)(VlGMM * self) { vl_uindex dim ; vl_uindex k ; vl_size numAdjusted = 0 ; TYPE * cov = (TYPE*)self->covariances ; double const * lbs = self->sigmaLowBound ; for (k = 0 ; k < self->numClusters ; ++k) { vl_bool adjusted = VL_FALSE ; for (dim = 0 ; dim < self->dimension ; ++dim) { if (cov[k * self->dimension + dim] < lbs[dim] ) { cov[k * self->dimension + dim] = lbs[dim] ; adjusted = VL_TRUE ; } } if (adjusted) { numAdjusted ++ ; } } if (numAdjusted > 0 && self->verbosity > 0) { VL_PRINT("gmm: detected %d of %d modes with at least one dimension " "with covariance too small (set to lower bound)\n", numAdjusted, self->numClusters) ; } } /* ---------------------------------------------------------------- */ /* EM - Maximization step */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_gmm_maximization_, SFX) (VlGMM * self, TYPE * posteriors, TYPE * priors, TYPE * covariances, TYPE * means, TYPE const * data, vl_size numData) { vl_size numClusters = self->numClusters; vl_index i_d, i_cl; vl_size dim ; TYPE * oldMeans ; double time = 0 ; if (self->verbosity > 1) { VL_PRINTF("gmm: em: entering maximization step\n") ; time = vl_get_cpu_time() ; } oldMeans = vl_malloc(sizeof(TYPE) * self->dimension * numClusters) ; memcpy(oldMeans, means, sizeof(TYPE) * self->dimension * numClusters) ; memset(priors, 0, sizeof(TYPE) * numClusters) ; memset(means, 0, sizeof(TYPE) * self->dimension * numClusters) ; memset(covariances, 0, sizeof(TYPE) * self->dimension * numClusters) ; #if defined(_OPENMP) #pragma omp parallel default(shared) private(i_d, i_cl, dim) \ num_threads(vl_get_max_threads()) #endif { TYPE * clusterPosteriorSum_, * means_, * covariances_ ; #if defined(_OPENMP) #pragma omp critical #endif { clusterPosteriorSum_ = vl_calloc(sizeof(TYPE), numClusters) ; means_ = vl_calloc(sizeof(TYPE), self->dimension * numClusters) ; covariances_ = vl_calloc(sizeof(TYPE), self->dimension * numClusters) ; } /* Accumulate weighted sums and sum of square differences. Once normalized, these become the means and covariances of each Gaussian mode. The squared differences will be taken w.r.t. the old means however. In this manner, one avoids doing two passes across the data. Eventually, these are corrected to account for the new means properly. In principle, one could set the old means to zero, but this may cause numerical instabilities (by accumulating large squares). */ #if defined(_OPENMP) #pragma omp for #endif for (i_d = 0 ; i_d < (signed)numData ; ++i_d) { for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) { TYPE p = posteriors[i_cl + i_d * self->numClusters] ; vl_bool calculated = VL_FALSE ; /* skip very small associations for speed */ if (p < VL_GMM_MIN_POSTERIOR / numClusters) { continue ; } clusterPosteriorSum_ [i_cl] += p ; #ifndef VL_DISABLE_AVX if (vl_get_simd_enabled() && vl_cpu_has_avx()) { VL_XCAT(_vl_weighted_mean_sse2_, SFX) (self->dimension, means_+ i_cl * self->dimension, data + i_d * self->dimension, p) ; VL_XCAT(_vl_weighted_sigma_sse2_, SFX) (self->dimension, covariances_ + i_cl * self->dimension, data + i_d * self->dimension, oldMeans + i_cl * self->dimension, p) ; calculated = VL_TRUE; } #endif #ifndef VL_DISABLE_SSE2 if (vl_get_simd_enabled() && vl_cpu_has_sse2() && !calculated) { VL_XCAT(_vl_weighted_mean_sse2_, SFX) (self->dimension, means_+ i_cl * self->dimension, data + i_d * self->dimension, p) ; VL_XCAT(_vl_weighted_sigma_sse2_, SFX) (self->dimension, covariances_ + i_cl * self->dimension, data + i_d * self->dimension, oldMeans + i_cl * self->dimension, p) ; calculated = VL_TRUE; } #endif if(!calculated) { for (dim = 0 ; dim < self->dimension ; ++dim) { TYPE x = data[i_d * self->dimension + dim] ; TYPE mu = oldMeans[i_cl * self->dimension + dim] ; TYPE diff = x - mu ; means_ [i_cl * self->dimension + dim] += p * x ; covariances_ [i_cl * self->dimension + dim] += p * (diff*diff) ; } } } } /* accumulate */ #if defined(_OPENMP) #pragma omp critical #endif { for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) { priors [i_cl] += clusterPosteriorSum_ [i_cl]; for (dim = 0 ; dim < self->dimension ; ++dim) { means [i_cl * self->dimension + dim] += means_ [i_cl * self->dimension + dim] ; covariances [i_cl * self->dimension + dim] += covariances_ [i_cl * self->dimension + dim] ; } } vl_free(means_); vl_free(covariances_); vl_free(clusterPosteriorSum_); } } /* parallel section */ /* at this stage priors[] contains the total mass of each cluster */ for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) { TYPE mass = priors[i_cl] ; /* do not update modes that do not recieve mass */ if (mass >= 1e-6 / numClusters) { for (dim = 0 ; dim < self->dimension ; ++dim) { means[i_cl * self->dimension + dim] /= mass ; covariances[i_cl * self->dimension + dim] /= mass ; } } } /* apply old to new means correction */ for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) { TYPE mass = priors[i_cl] ; if (mass >= 1e-6 / numClusters) { for (dim = 0 ; dim < self->dimension ; ++dim) { TYPE mu = means[i_cl * self->dimension + dim] ; TYPE oldMu = oldMeans[i_cl * self->dimension + dim] ; TYPE diff = mu - oldMu ; covariances[i_cl * self->dimension + dim] -= diff * diff ; } } } VL_XCAT(_vl_gmm_apply_bounds_,SFX)(self) ; { TYPE sum = 0; for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) { sum += priors[i_cl] ; } sum = VL_MAX(sum, 1e-12) ; for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) { priors[i_cl] /= sum ; } } if (self->verbosity > 1) { VL_PRINTF("gmm: em: maximization step completed in %.2f s\n", vl_get_cpu_time() - time) ; } vl_free(oldMeans); } /* ---------------------------------------------------------------- */ /* EM iterations */ /* ---------------------------------------------------------------- */ static double VL_XCAT(_vl_gmm_em_, SFX) (VlGMM * self, TYPE const * data, vl_size numData) { vl_size iteration, restarted ; double previousLL = (TYPE)(-VL_INFINITY_D) ; double LL = (TYPE)(-VL_INFINITY_D) ; double time = 0 ; _vl_gmm_prepare_for_data (self, numData) ; VL_XCAT(_vl_gmm_apply_bounds_,SFX)(self) ; for (iteration = 0 ; 1 ; ++ iteration) { double eps ; /* Expectation: assign data to Gaussian modes and compute log-likelihood. */ if (self->verbosity > 1) { VL_PRINTF("gmm: em: entering expectation step\n") ; time = vl_get_cpu_time() ; } LL = VL_XCAT(vl_get_gmm_data_posteriors_,SFX) (self->posteriors, self->numClusters, numData, self->priors, self->means, self->dimension, self->covariances, data) ; if (self->verbosity > 1) { VL_PRINTF("gmm: em: expectation step completed in %.2f s\n", vl_get_cpu_time() - time) ; } /* Check the termination conditions. */ if (self->verbosity) { VL_PRINTF("gmm: em: iteration %d: loglikelihood = %f (variation = %f)\n", iteration, LL, LL - previousLL) ; } if (iteration >= self->maxNumIterations) { if (self->verbosity) { VL_PRINTF("gmm: em: terminating because " "the maximum number of iterations " "(%d) has been reached.\n", self->maxNumIterations) ; } break ; } eps = vl_abs_d ((LL - previousLL) / (LL)); if ((iteration > 0) && (eps < 0.00001)) { if (self->verbosity) { VL_PRINTF("gmm: em: terminating because the algorithm " "fully converged (log-likelihood variation = %f).\n", eps) ; } break ; } previousLL = LL ; /* Restart empty modes. */ if (iteration > 1) { restarted = VL_XCAT(_vl_gmm_restart_empty_modes_, SFX) (self, data); if ((restarted > 0) & (self->verbosity > 0)) { VL_PRINTF("gmm: em: %d Gaussian modes restarted because " "they had become empty.\n", restarted); } } /* Maximization: reestimate the GMM parameters. */ VL_XCAT(_vl_gmm_maximization_, SFX) (self,self->posteriors,self->priors,self->covariances,self->means,data,numData) ; } return LL; } /* ---------------------------------------------------------------- */ /* Kmeans initialization of mixtures */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_gmm_init_with_kmeans_, SFX) (VlGMM * self, TYPE const * data, vl_size numData, VlKMeans * kmeansInit) { vl_size i_d ; vl_uint32 * assignments = vl_malloc(sizeof(vl_uint32) * numData); _vl_gmm_prepare_for_data (self, numData) ; memset(self->means,0,sizeof(TYPE) * self->numClusters * self->dimension) ; memset(self->priors,0,sizeof(TYPE) * self->numClusters) ; memset(self->covariances,0,sizeof(TYPE) * self->numClusters * self->dimension) ; memset(self->posteriors,0,sizeof(TYPE) * self->numClusters * numData) ; /* setup speified KMeans initialization object if any */ if (kmeansInit) { vl_gmm_set_kmeans_init_object (self, kmeansInit) ; } /* if a KMeans initalization object is still unavailable, create one */ if(self->kmeansInit == NULL) { vl_size ncomparisons = VL_MAX(numData / 4, 10) ; vl_size niter = 5 ; vl_size ntrees = 1 ; vl_size nrepetitions = 1 ; VlKMeansAlgorithm algorithm = VlKMeansANN ; VlKMeansInitialization initialization = VlKMeansRandomSelection ; VlKMeans * kmeansInitDefault = vl_kmeans_new(self->dataType,VlDistanceL2) ; vl_kmeans_set_initialization(kmeansInitDefault, initialization); vl_kmeans_set_max_num_iterations (kmeansInitDefault, niter) ; vl_kmeans_set_max_num_comparisons (kmeansInitDefault, ncomparisons) ; vl_kmeans_set_num_trees (kmeansInitDefault, ntrees); vl_kmeans_set_algorithm (kmeansInitDefault, algorithm); vl_kmeans_set_num_repetitions(kmeansInitDefault, nrepetitions); vl_kmeans_set_verbosity (kmeansInitDefault, self->verbosity); self->kmeansInit = kmeansInitDefault; self->kmeansInitIsOwner = VL_TRUE ; } /* Use k-means to assign data to clusters */ vl_kmeans_cluster (self->kmeansInit, data, self->dimension, numData, self->numClusters); vl_kmeans_quantize (self->kmeansInit, assignments, NULL, data, numData) ; /* Transform the k-means assignments in posteriors and estimates the mode parameters */ for(i_d = 0; i_d < numData; i_d++) { ((TYPE*)self->posteriors)[assignments[i_d] + i_d * self->numClusters] = (TYPE) 1.0 ; } /* Update cluster parameters */ VL_XCAT(_vl_gmm_maximization_, SFX) (self,self->posteriors,self->priors,self->covariances,self->means,data,numData); vl_free(assignments) ; } /* ---------------------------------------------------------------- */ /* Random initialization of mixtures */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_gmm_compute_init_sigma_, SFX) (VlGMM * self, TYPE const * data, TYPE * initSigma, vl_size dimension, vl_size numData) { vl_size dim; vl_uindex i; TYPE * dataMean ; memset(initSigma,0,sizeof(TYPE)*dimension) ; if (numData <= 1) return ; dataMean = vl_malloc(sizeof(TYPE)*dimension); memset(dataMean,0,sizeof(TYPE)*dimension) ; /* find mean of the whole dataset */ for(dim = 0 ; dim < dimension ; dim++) { for(i = 0 ; i < numData ; i++) { dataMean[dim] += data[i*dimension + dim]; } dataMean[dim] /= numData; } /* compute variance of the whole dataset */ for(dim = 0; dim < dimension; dim++) { for(i = 0; i < numData; i++) { TYPE diff = (data[i*self->dimension + dim] - dataMean[dim]) ; initSigma[dim] += diff*diff ; } initSigma[dim] /= numData - 1 ; } vl_free(dataMean) ; } static void VL_XCAT(_vl_gmm_init_with_rand_data_, SFX) (VlGMM * self, TYPE const * data, vl_size numData) { vl_uindex i, k, dim ; VlKMeans * kmeans ; _vl_gmm_prepare_for_data(self, numData) ; /* initilaize priors of gaussians so they are equal and sum to one */ for (i = 0 ; i < self->numClusters ; ++i) { ((TYPE*)self->priors)[i] = (TYPE) (1.0 / self->numClusters) ; } /* initialize diagonals of covariance matrices to data covariance */ VL_XCAT(_vl_gmm_compute_init_sigma_, SFX) (self, data, self->covariances, self->dimension, numData); for (k = 1 ; k < self->numClusters ; ++ k) { for(dim = 0; dim < self->dimension; dim++) { *((TYPE*)self->covariances + k * self->dimension + dim) = *((TYPE*)self->covariances + dim) ; } } /* use kmeans++ initialization to pick points at random */ kmeans = vl_kmeans_new(self->dataType,VlDistanceL2) ; vl_kmeans_init_centers_plus_plus(kmeans, data, self->dimension, numData, self->numClusters) ; memcpy(self->means, vl_kmeans_get_centers(kmeans), sizeof(TYPE) * self->dimension * self->numClusters) ; vl_kmeans_delete(kmeans) ; } /* ---------------------------------------------------------------- */ #else /* VL_GMM_INSTANTIATING */ /* ---------------------------------------------------------------- */ #ifndef __DOXYGEN__ #define FLT VL_TYPE_FLOAT #define TYPE float #define SFX f #define VL_GMM_INSTANTIATING #include "gmm.c" #define FLT VL_TYPE_DOUBLE #define TYPE double #define SFX d #define VL_GMM_INSTANTIATING #include "gmm.c" #endif /* VL_GMM_INSTANTIATING */ #endif /* ---------------------------------------------------------------- */ #ifndef VL_GMM_INSTANTIATING /* ---------------------------------------------------------------- */ /** @brief Create a new GMM object by copy ** @param self object. ** @return new copy. ** ** Most parameters, including the cluster priors, means, and ** covariances are copied. Data posteriors (available after ** initalization or EM) are not; nor is the KMeans object used for ** initialization, if any. **/ VlGMM * vl_gmm_new_copy (VlGMM const * self) { vl_size size = vl_get_type_size(self->dataType) ; VlGMM * gmm = vl_gmm_new(self->dataType, self->dimension, self->numClusters); gmm->initialization = self->initialization; gmm->maxNumIterations = self->maxNumIterations; gmm->numRepetitions = self->numRepetitions; gmm->verbosity = self->verbosity; gmm->LL = self->LL; memcpy(gmm->means, self->means, size*self->numClusters*self->dimension); memcpy(gmm->covariances, self->covariances, size*self->numClusters*self->dimension); memcpy(gmm->priors, self->priors, size*self->numClusters); return gmm ; } /** @brief Initialize mixture before EM takes place using random initialization ** @param self GMM object instance. ** @param data data points which should be clustered. ** @param numData number of data points. **/ void vl_gmm_init_with_rand_data (VlGMM * self, void const * data, vl_size numData) { vl_gmm_reset (self) ; switch (self->dataType) { case VL_TYPE_FLOAT : _vl_gmm_init_with_rand_data_f (self, (float const *)data, numData) ; break ; case VL_TYPE_DOUBLE : _vl_gmm_init_with_rand_data_d (self, (double const *)data, numData) ; break ; default: abort() ; } } /** @brief Initializes the GMM using KMeans ** @param self GMM object instance. ** @param data data points which should be clustered. ** @param numData number of data points. ** @param kmeansInit KMeans object to use. **/ void vl_gmm_init_with_kmeans (VlGMM * self, void const * data, vl_size numData, VlKMeans * kmeansInit) { vl_gmm_reset (self) ; switch (self->dataType) { case VL_TYPE_FLOAT : _vl_gmm_init_with_kmeans_f (self, (float const *)data, numData, kmeansInit) ; break ; case VL_TYPE_DOUBLE : _vl_gmm_init_with_kmeans_d (self, (double const *)data, numData, kmeansInit) ; break ; default: abort() ; } } #if 0 #include<fenv.h> #endif /** @brief Run GMM clustering - includes initialization and EM ** @param self GMM object instance. ** @param data data points which should be clustered. ** @param numData number of data points. **/ double vl_gmm_cluster (VlGMM * self, void const * data, vl_size numData) { void * bestPriors = NULL ; void * bestMeans = NULL; void * bestCovariances = NULL; void * bestPosteriors = NULL; vl_size size = vl_get_type_size(self->dataType) ; double bestLL = -VL_INFINITY_D; vl_uindex repetition; assert(self->numRepetitions >=1) ; bestPriors = vl_malloc(size * self->numClusters) ; bestMeans = vl_malloc(size * self->dimension * self->numClusters) ; bestCovariances = vl_malloc(size * self->dimension * self->numClusters) ; bestPosteriors = vl_malloc(size * self->numClusters * numData) ; #if 0 feenableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW); #endif for (repetition = 0 ; repetition < self->numRepetitions ; ++ repetition) { double LL ; double timeRef ; if (self->verbosity) { VL_PRINTF("gmm: clustering: starting repetition %d of %d\n", repetition + 1, self->numRepetitions) ; } /* initialize a new mixture model */ timeRef = vl_get_cpu_time() ; switch (self->initialization) { case VlGMMKMeans : vl_gmm_init_with_kmeans (self, data, numData, NULL) ; break ; case VlGMMRand : vl_gmm_init_with_rand_data (self, data, numData) ; break ; case VlGMMCustom : break ; default: abort() ; } if (self->verbosity) { VL_PRINTF("gmm: model initialized in %.2f s\n", vl_get_cpu_time() - timeRef) ; } /* fit the model to data by running EM */ timeRef = vl_get_cpu_time () ; LL = vl_gmm_em (self, data, numData) ; if (self->verbosity) { VL_PRINTF("gmm: optimization terminated in %.2f s with loglikelihood %f\n", vl_get_cpu_time() - timeRef, LL) ; } if (LL > bestLL || repetition == 0) { void * temp ; temp = bestPriors ; bestPriors = self->priors ; self->priors = temp ; temp = bestMeans ; bestMeans = self->means ; self->means = temp ; temp = bestCovariances ; bestCovariances = self->covariances ; self->covariances = temp ; temp = bestPosteriors ; bestPosteriors = self->posteriors ; self->posteriors = temp ; bestLL = LL; } } vl_free (self->priors) ; vl_free (self->means) ; vl_free (self->covariances) ; vl_free (self->posteriors) ; self->priors = bestPriors ; self->means = bestMeans ; self->covariances = bestCovariances ; self->posteriors = bestPosteriors ; self->LL = bestLL; if (self->verbosity) { VL_PRINTF("gmm: all repetitions terminated with final loglikelihood %f\n", self->LL) ; } return bestLL ; } /** @brief Invoke the EM algorithm. ** @param self GMM object instance. ** @param data data points which should be clustered. ** @param numData number of data points. **/ double vl_gmm_em (VlGMM * self, void const * data, vl_size numData) { switch (self->dataType) { case VL_TYPE_FLOAT: return _vl_gmm_em_f (self, (float const *)data, numData) ; break ; case VL_TYPE_DOUBLE: return _vl_gmm_em_d (self, (double const *)data, numData) ; break ; default: abort() ; } return 0 ; } /** @brief Explicitly set the initial means for EM. ** @param self GMM object instance. ** @param means initial values of means. **/ void vl_gmm_set_means (VlGMM * self, void const * means) { memcpy(self->means,means, self->dimension * self->numClusters * vl_get_type_size(self->dataType)); } /** @brief Explicitly set the initial sigma diagonals for EM. ** @param self GMM object instance. ** @param covariances initial values of covariance matrix diagonals. **/ void vl_gmm_set_covariances (VlGMM * self, void const * covariances) { memcpy(self->covariances,covariances, self->dimension * self->numClusters * vl_get_type_size(self->dataType)); } /** @brief Explicitly set the initial priors of the gaussians. ** @param self GMM object instance. ** @param priors initial values of the gaussian priors. **/ void vl_gmm_set_priors (VlGMM * self, void const * priors) { memcpy(self->priors,priors, self->numClusters * vl_get_type_size(self->dataType)); } /* VL_GMM_INSTANTIATING */ #endif #undef SFX #undef TYPE #undef FLT #undef VL_GMM_INSTANTIATING
common.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_UTILS_COMMON_FUN_H_ #define LIGHTGBM_UTILS_COMMON_FUN_H_ #include <LightGBM/utils/log.h> #include <LightGBM/utils/openmp_wrapper.h> #include <limits> #include <string> #include <algorithm> #include <cmath> #include <cstdint> #include <cstdio> #include <functional> #include <iomanip> #include <iterator> #include <memory> #include <sstream> #include <type_traits> #include <utility> #include <vector> #ifdef _MSC_VER #include "intrin.h" #endif namespace LightGBM { namespace Common { inline static char tolower(char in) { if (in <= 'Z' && in >= 'A') return in - ('Z' - 'z'); return in; } inline static std::string Trim(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1); str.erase(0, str.find_first_not_of(" \f\n\r\t\v")); return str; } inline static std::string RemoveQuotationSymbol(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of("'\"") + 1); str.erase(0, str.find_first_not_of("'\"")); return str; } inline static bool StartsWith(const std::string& str, const std::string prefix) { if (str.substr(0, prefix.size()) == prefix) { return true; } else { return false; } } inline static std::vector<std::string> Split(const char* c_str, char delimiter) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == delimiter) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> SplitLines(const char* c_str) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == '\n' || str[pos] == '\r') { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } // skip the line endings while (str[pos] == '\n' || str[pos] == '\r') ++pos; // new begin i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { bool met_delimiters = false; for (int j = 0; delimiters[j] != '\0'; ++j) { if (str[pos] == delimiters[j]) { met_delimiters = true; break; } } if (met_delimiters) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } template<typename T> inline static const char* Atoi(const char* p, T* out) { int sign; T value; while (*p == ' ') { ++p; } sign = 1; if (*p == '-') { sign = -1; ++p; } else if (*p == '+') { ++p; } for (value = 0; *p >= '0' && *p <= '9'; ++p) { value = value * 10 + (*p - '0'); } *out = static_cast<T>(sign * value); while (*p == ' ') { ++p; } return p; } template <typename T> inline void SplitToIntLike(const char *c_str, char delimiter, std::vector<T> &ret) { CHECK(ret.empty()); std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == delimiter) { if (i < pos) { ret.push_back({}); Atoi(str.substr(i, pos - i).c_str(), &ret.back()); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back({}); Atoi(str.substr(i).c_str(), &ret.back()); } } template<typename T> inline static double Pow(T base, int power) { if (power < 0) { return 1.0 / Pow(base, -power); } else if (power == 0) { return 1; } else if (power % 2 == 0) { return Pow(base*base, power / 2); } else if (power % 3 == 0) { return Pow(base*base*base, power / 3); } else { return base * Pow(base, power - 1); } } inline static const char* Atof(const char* p, double* out) { int frac; double sign, value, scale; *out = NAN; // Skip leading white space, if any. while (*p == ' ') { ++p; } // Get sign, if any. sign = 1.0; if (*p == '-') { sign = -1.0; ++p; } else if (*p == '+') { ++p; } // is a number if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') { // Get digits before decimal point or exponent, if any. for (value = 0.0; *p >= '0' && *p <= '9'; ++p) { value = value * 10.0 + (*p - '0'); } // Get digits after decimal point, if any. if (*p == '.') { double right = 0.0; int nn = 0; ++p; while (*p >= '0' && *p <= '9') { right = (*p - '0') + right * 10.0; ++nn; ++p; } value += right / Pow(10.0, nn); } // Handle exponent, if any. frac = 0; scale = 1.0; if ((*p == 'e') || (*p == 'E')) { uint32_t expon; // Get sign of exponent, if any. ++p; if (*p == '-') { frac = 1; ++p; } else if (*p == '+') { ++p; } // Get digits of exponent, if any. for (expon = 0; *p >= '0' && *p <= '9'; ++p) { expon = expon * 10 + (*p - '0'); } if (expon > 308) expon = 308; // Calculate scaling factor. while (expon >= 50) { scale *= 1E50; expon -= 50; } while (expon >= 8) { scale *= 1E8; expon -= 8; } while (expon > 0) { scale *= 10.0; expon -= 1; } } // Return signed and scaled floating point result. *out = sign * (frac ? (value / scale) : (value * scale)); } else { size_t cnt = 0; while (*(p + cnt) != '\0' && *(p + cnt) != ' ' && *(p + cnt) != '\t' && *(p + cnt) != ',' && *(p + cnt) != '\n' && *(p + cnt) != '\r' && *(p + cnt) != ':') { ++cnt; } if (cnt > 0) { std::string tmp_str(p, cnt); std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower); if (tmp_str == std::string("na") || tmp_str == std::string("nan") || tmp_str == std::string("null")) { *out = NAN; } else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) { *out = sign * 1e308; } else { Log::Fatal("Unknown token %s in data file", tmp_str.c_str()); } p += cnt; } } while (*p == ' ') { ++p; } return p; } inline static bool AtoiAndCheck(const char* p, int* out) { const char* after = Atoi(p, out); if (*after != '\0') { return false; } return true; } inline static bool AtofAndCheck(const char* p, double* out) { const char* after = Atof(p, out); if (*after != '\0') { return false; } return true; } inline static unsigned CountDecimalDigit32(uint32_t n) { #if defined(_MSC_VER) || defined(__GNUC__) static const uint32_t powers_of_10[] = { 0, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; #ifdef _MSC_VER unsigned long i = 0; _BitScanReverse(&i, n | 1); uint32_t t = (i + 1) * 1233 >> 12; #elif __GNUC__ uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12; #endif return t - (n < powers_of_10[t]) + 1; #else if (n < 10) return 1; if (n < 100) return 2; if (n < 1000) return 3; if (n < 10000) return 4; if (n < 100000) return 5; if (n < 1000000) return 6; if (n < 10000000) return 7; if (n < 100000000) return 8; if (n < 1000000000) return 9; return 10; #endif } inline static void Uint32ToStr(uint32_t value, char* buffer) { const char kDigitsLut[200] = { '0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', '7', '0', '8', '0', '9', '1', '0', '1', '1', '1', '2', '1', '3', '1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9', '2', '0', '2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9', '3', '0', '3', '1', '3', '2', '3', '3', '3', '4', '3', '5', '3', '6', '3', '7', '3', '8', '3', '9', '4', '0', '4', '1', '4', '2', '4', '3', '4', '4', '4', '5', '4', '6', '4', '7', '4', '8', '4', '9', '5', '0', '5', '1', '5', '2', '5', '3', '5', '4', '5', '5', '5', '6', '5', '7', '5', '8', '5', '9', '6', '0', '6', '1', '6', '2', '6', '3', '6', '4', '6', '5', '6', '6', '6', '7', '6', '8', '6', '9', '7', '0', '7', '1', '7', '2', '7', '3', '7', '4', '7', '5', '7', '6', '7', '7', '7', '8', '7', '9', '8', '0', '8', '1', '8', '2', '8', '3', '8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9', '9', '0', '9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', '7', '9', '8', '9', '9' }; unsigned digit = CountDecimalDigit32(value); buffer += digit; *buffer = '\0'; while (value >= 100) { const unsigned i = (value % 100) << 1; value /= 100; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } if (value < 10) { *--buffer = static_cast<char>(value) + '0'; } else { const unsigned i = value << 1; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } } inline static void Int32ToStr(int32_t value, char* buffer) { uint32_t u = static_cast<uint32_t>(value); if (value < 0) { *buffer++ = '-'; u = ~u + 1; } Uint32ToStr(u, buffer); } inline static void DoubleToStr(double value, char* buffer, size_t #ifdef _MSC_VER buffer_len #endif ) { #ifdef _MSC_VER sprintf_s(buffer, buffer_len, "%.17g", value); #else sprintf(buffer, "%.17g", value); #endif } inline static const char* SkipSpaceAndTab(const char* p) { while (*p == ' ' || *p == '\t') { ++p; } return p; } inline static const char* SkipReturn(const char* p) { while (*p == '\n' || *p == '\r' || *p == ' ') { ++p; } return p; } template<typename T, typename T2> inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) { std::vector<T2> ret(arr.size()); for (size_t i = 0; i < arr.size(); ++i) { ret[i] = static_cast<T2>(arr[i]); } return ret; } template<typename T, bool is_float, bool is_unsign> struct __TToStringHelperFast { void operator()(T value, char* buffer, size_t) const { Int32ToStr(value, buffer); } }; template<typename T> struct __TToStringHelperFast<T, true, false> { void operator()(T value, char* buffer, size_t #ifdef _MSC_VER buf_len #endif ) const { #ifdef _MSC_VER sprintf_s(buffer, buf_len, "%g", value); #else sprintf(buffer, "%g", value); #endif } }; template<typename T> struct __TToStringHelperFast<T, false, true> { void operator()(T value, char* buffer, size_t) const { Uint32ToStr(value, buffer); } }; template<typename T> inline static std::string ArrayToStringFast(const std::vector<T>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } __TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper; const size_t buf_len = 16; std::vector<char> buffer(buf_len); std::stringstream str_buf; helper(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { helper(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } inline static std::string ArrayToString(const std::vector<double>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } const size_t buf_len = 32; std::vector<char> buffer(buf_len); std::stringstream str_buf; DoubleToStr(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { DoubleToStr(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } template<typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string& str) const { T ret = 0; Atoi(str.c_str(), &ret); return ret; } }; template<typename T> struct __StringToTHelper<T, true> { T operator()(const std::string& str) const { return static_cast<T>(std::stod(str)); } }; template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter) { std::vector<std::string> strs = Split(str.c_str(), delimiter); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T> inline static std::vector<T> StringToArray(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } std::vector<std::string> strs = Split(str.c_str(), ' '); CHECK(strs.size() == static_cast<size_t>(n)); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T, bool is_float> struct __StringToTHelperFast { const char* operator()(const char*p, T* out) const { return Atoi(p, out); } }; template<typename T> struct __StringToTHelperFast<T, true> { const char* operator()(const char*p, T* out) const { double tmp = 0.0f; auto ret = Atof(p, &tmp); *out = static_cast<T>(tmp); return ret; } }; template<typename T> inline static std::vector<T> StringToArrayFast(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } auto p_str = str.c_str(); __StringToTHelperFast<T, std::is_floating_point<T>::value> helper; std::vector<T> ret(n); for (int i = 0; i < n; ++i) { p_str = helper(p_str, &ret[i]); } return ret; } template<typename T> inline static std::string Join(const std::vector<T>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[0]; for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } template<> inline std::string Join<int8_t>(const std::vector<int8_t>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << static_cast<int16_t>(strs[0]); for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << static_cast<int16_t>(strs[i]); } return str_buf.str(); } template<typename T> inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) { if (end - start <= 0) { return std::string(""); } start = std::min(start, static_cast<size_t>(strs.size()) - 1); end = std::min(end, static_cast<size_t>(strs.size())); std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[start]; for (size_t i = start + 1; i < end; ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } inline static int64_t Pow2RoundUp(int64_t x) { int64_t t = 1; for (int i = 0; i < 64; ++i) { if (t >= x) { return t; } t <<= 1; } return 0; } /*! * \brief Do inplace softmax transformaton on p_rec * \param p_rec The input/output vector of the values. */ inline static void Softmax(std::vector<double>* p_rec) { std::vector<double> &rec = *p_rec; double wmax = rec[0]; for (size_t i = 1; i < rec.size(); ++i) { wmax = std::max(rec[i], wmax); } double wsum = 0.0f; for (size_t i = 0; i < rec.size(); ++i) { rec[i] = std::exp(rec[i] - wmax); wsum += rec[i]; } for (size_t i = 0; i < rec.size(); ++i) { rec[i] /= static_cast<double>(wsum); } } inline static void Softmax(const double* input, double* output, int len) { double wmax = input[0]; for (int i = 1; i < len; ++i) { wmax = std::max(input[i], wmax); } double wsum = 0.0f; for (int i = 0; i < len; ++i) { output[i] = std::exp(input[i] - wmax); wsum += output[i]; } for (int i = 0; i < len; ++i) { output[i] /= static_cast<double>(wsum); } } template<typename T> std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) { std::vector<const T*> ret; for (size_t i = 0; i < input.size(); ++i) { ret.push_back(input.at(i).get()); } return ret; } template<typename T1, typename T2> inline static void SortForPair(std::vector<T1>& keys, std::vector<T2>& values, size_t start, bool is_reverse = false) { std::vector<std::pair<T1, T2>> arr; for (size_t i = start; i < keys.size(); ++i) { arr.emplace_back(keys[i], values[i]); } if (!is_reverse) { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first < b.first; }); } else { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first > b.first; }); } for (size_t i = start; i < arr.size(); ++i) { keys[i] = arr[i].first; values[i] = arr[i].second; } } template <typename T> inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>& data) { std::vector<T*> ptr(data.size()); for (size_t i = 0; i < data.size(); ++i) { ptr[i] = data[i].data(); } return ptr; } template <typename T> inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) { std::vector<int> ret(data.size()); for (size_t i = 0; i < data.size(); ++i) { ret[i] = static_cast<int>(data[i].size()); } return ret; } inline static double AvoidInf(double x) { if (std::isnan(x)) { return 0.0; } else if (x >= 1e300) { return 1e300; } else if (x <= -1e300) { return -1e300; } else { return x; } } inline static float AvoidInf(float x) { if (std::isnan(x)){ return 0.0f; } else if (x >= 1e38) { return 1e38f; } else if (x <= -1e38) { return -1e38f; } else { return x; } } template<typename _Iter> inline static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) { return (0); } template<typename _RanIt, typename _Pr, typename _VTRanIt> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) { size_t len = _Last - _First; const size_t kMinInnerLen = 1024; int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } if (len <= kMinInnerLen || num_threads <= 1) { std::sort(_First, _Last, _Pred); return; } size_t inner_size = (len + num_threads - 1) / num_threads; inner_size = std::max(inner_size, kMinInnerLen); num_threads = static_cast<int>((len + inner_size - 1) / inner_size); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < num_threads; ++i) { size_t left = inner_size*i; size_t right = left + inner_size; right = std::min(right, len); if (right > left) { std::sort(_First + left, _First + right, _Pred); } } // Buffer for merge. std::vector<_VTRanIt> temp_buf(len); _RanIt buf = temp_buf.begin(); size_t s = inner_size; // Recursive merge while (s < len) { int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2)); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < loop_size; ++i) { size_t left = i * 2 * s; size_t mid = left + s; size_t right = mid + s; right = std::min(len, right); if (mid >= right) { continue; } std::copy(_First + left, _First + mid, buf + left); std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred); } s *= 2; } } template<typename _RanIt, typename _Pr> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) { return ParallelSort(_First, _Last, _Pred, IteratorValType(_First)); } // Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not template <typename T> inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) { auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) { std::ostringstream os; os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]"; Log::Fatal(os.str().c_str(), callername, i); }; for (int i = 1; i < ny; i += 2) { if (y[i - 1] < y[i]) { if (y[i - 1] < ymin) { fatal_msg(i - 1); } else if (y[i] > ymax) { fatal_msg(i); } } else { if (y[i - 1] > ymax) { fatal_msg(i - 1); } else if (y[i] < ymin) { fatal_msg(i); } } } if (ny & 1) { // odd if (y[ny - 1] < ymin || y[ny - 1] > ymax) { fatal_msg(ny - 1); } } } // One-pass scan over array w with nw elements: find min, max and sum of elements; // this is useful for checking weight requirements. template <typename T1, typename T2> inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) { T1 minw; T1 maxw; T1 sumw; int i; if (nw & 1) { // odd minw = w[0]; maxw = w[0]; sumw = w[0]; i = 2; } else { // even if (w[0] < w[1]) { minw = w[0]; maxw = w[1]; } else { minw = w[1]; maxw = w[0]; } sumw = w[0] + w[1]; i = 3; } for (; i < nw; i += 2) { if (w[i - 1] < w[i]) { minw = std::min(minw, w[i - 1]); maxw = std::max(maxw, w[i]); } else { minw = std::min(minw, w[i]); maxw = std::max(maxw, w[i - 1]); } sumw += w[i - 1] + w[i]; } if (mi != nullptr) { *mi = minw; } if (ma != nullptr) { *ma = maxw; } if (su != nullptr) { *su = static_cast<T2>(sumw); } } inline static std::vector<uint32_t> EmptyBitset(int n) { int size = n / 32; if (n % 32 != 0) ++size; return std::vector<uint32_t>(size); } template<typename T> inline static void InsertBitset(std::vector<uint32_t>& vec, const T val) { int i1 = val / 32; int i2 = val % 32; if (static_cast<int>(vec.size()) < i1 + 1) { vec.resize(i1 + 1, 0); } vec[i1] |= (1 << i2); } template<typename T> inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) { std::vector<uint32_t> ret; for (int i = 0; i < n; ++i) { int i1 = vals[i] / 32; int i2 = vals[i] % 32; if (static_cast<int>(ret.size()) < i1 + 1) { ret.resize(i1 + 1, 0); } ret[i1] |= (1 << i2); } return ret; } template<typename T> inline static bool FindInBitset(const uint32_t* bits, int n, T pos) { int i1 = pos / 32; if (i1 >= n) { return false; } int i2 = pos % 32; return (bits[i1] >> i2) & 1; } inline static bool CheckDoubleEqualOrdered(double a, double b) { double upper = std::nextafter(a, INFINITY); return b <= upper; } inline static double GetDoubleUpperBound(double a) { return std::nextafter(a, INFINITY);; } inline static size_t GetLine(const char* str) { auto start = str; while (*str != '\0' && *str != '\n' && *str != '\r') { ++str; } return str - start; } inline static const char* SkipNewLine(const char* str) { if (*str == '\r') { ++str; } if (*str == '\n') { ++str; } return str; } template <typename T> static int Sign(T x) { return (x > T(0)) - (x < T(0)); } template <typename T> static T SafeLog(T x) { if (x > 0) { return std::log(x); } else { return -INFINITY; } } inline bool CheckASCII(const std::string& s) { for (auto c : s) { if (static_cast<unsigned char>(c) > 127) { return false; } } return true; } } // namespace Common } // namespace LightGBM #endif // LightGBM_UTILS_COMMON_FUN_H_
test.c
#include <stdio.h> #include <omp.h> #include <math.h> #pragma omp requires unified_shared_memory #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (992) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; INIT(); int cpuExec = 0; #pragma omp target map(tofrom: cpuExec) { cpuExec = omp_is_initial_device(); } int gpu_threads = 128; int cpu_threads = 32; int max_threads = cpuExec ? cpu_threads : gpu_threads; // // Test: omp_get_thread_num() // ZERO(A); TESTD("omp target parallel num_threads(max_threads)", { int tid = omp_get_thread_num(); A[tid] += tid; }, VERIFY(0, max_threads, A[i], i*(trial+1))); // // Test: Execute parallel on device // TESTD("omp target parallel num_threads(max_threads)", { int i = omp_get_thread_num()*4; for (int j = i; j < i + 4; j++) { B[j] = D[j] + E[j]; } }, VERIFY(0, max_threads*4, B[i], (double)0)); // // Test: if clause serial execution of parallel region on host // ZERO(A); TESTD("omp target parallel num_threads(max_threads) if(0)", { int tid = omp_get_thread_num(); A[tid] = tid; }, VERIFY(0, max_threads, A[i], 0)); // // Test: if clause parallel execution of parallel region on device // ZERO(A); TESTD("omp target parallel num_threads(max_threads) if(A[0] == 0)", { int tid = omp_get_thread_num(); A[tid] = tid + omp_is_initial_device(); }, VERIFY(0, max_threads, A[i], i + cpuExec)); // // Test: if clause serial execution of parallel region on device // ZERO(A); TESTD("omp target parallel num_threads(max_threads) if(parallel: 0)", { int tid = omp_get_thread_num(); A[tid] = !omp_is_initial_device(); }, VERIFY(0, max_threads, A[i], i == 0 ? 1 - cpuExec : 0)); // // Test: if clause parallel execution of parallel region on host // ZERO(A); TESTD("omp target parallel num_threads(max_threads) if(target: 0) if(parallel: A[0] == 0)", { int tid = omp_get_thread_num(); A[tid] = tid + omp_is_initial_device(); }, VERIFY(0, /* bound to */ cpu_threads, A[i], i+1)); // // Test: if clause serial execution of parallel region on device without num_threads clause // ZERO(A); TESTD("omp target parallel if(parallel: A[0] > 0)", { int tid = omp_get_thread_num(); A[tid] = omp_get_num_threads(); }, VERIFY(0, 1, A[0], 1)); // // Test: if clause parallel execution of parallel region on device without num_threads clause // The testcase should be launched with the default number of threads. // ZERO(A); #pragma omp target parallel if(parallel: A[0] == 0) { // Get default number of threads launched by this runtime. B[0] = omp_get_num_threads(); } TESTD("omp target parallel if(parallel: A[0] == 0)", { int tid = omp_get_thread_num(); A[tid] = omp_get_num_threads(); }, VERIFY(0, 1, A[0], B[0])); // // Test: if clause parallel execution of parallel region on device with num_threads clause // ZERO(A); TESTD("omp target parallel num_threads(1) if(parallel: A[0] == 0)", { int tid = omp_get_thread_num(); A[tid] = omp_get_num_threads(); }, VERIFY(0, 1, A[0], 1)); // // Test: proc_bind clause // TESTD("omp target parallel num_threads(max_threads) proc_bind(master)", { int i = omp_get_thread_num()*4; for (int j = i; j < i + 4; j++) { B[j] = 1 + D[j] + E[j]; } }, VERIFY(0, max_threads*4, B[i], 1)); TESTD("omp target parallel num_threads(max_threads) proc_bind(close)", { int i = omp_get_thread_num()*4; for (int j = i; j < i + 4; j++) { B[j] = 1 + D[j] + E[j]; } }, VERIFY(0, max_threads*4, B[i], 1)); TESTD("omp target parallel num_threads(max_threads) proc_bind(spread)", { int i = omp_get_thread_num()*4; for (int j = i; j < i + 4; j++) { B[j] = 1 + D[j] + E[j]; } }, VERIFY(0, max_threads*4, B[i], 1)); // // Test: num_threads on parallel. // for (int t = 1; t <= max_threads; t += (t < 32) ? 31 : 32) { ZERO(A); int threads[1]; threads[0] = t; TESTD("omp target parallel num_threads(threads[0])", { int tid = omp_get_thread_num(); A[tid] = 99; }, VERIFY(0, 128, A[i], 99*(i < t))); } DUMP_SUCCESS(gpu_threads-max_threads); // // Test: sharing of variables from host to parallel region. // ZERO(A); { double tmp = 1; A[0] = tmp; TESTD("omp target parallel map(tofrom: tmp) num_threads(1)", { tmp = 2; A[0] += tmp; }, VERIFY(0, 1, A[i]+tmp, (1+trial)*2+1+2)); } // // Test: private clause on target parallel region. // ZERO(A); { double p[1], q = 99; p[0] = 1; A[0] = p[0]; TESTD("omp target parallel private(p, q) num_threads(1)", { p[0] = 2; q = 0; A[0] += p[0]; }, VERIFY(0, 1, A[i]+p[0]+q, (1+trial)*2+2+99)); } // // Test: firstprivate clause on parallel region. // ZERO(A); { double p[1], q = 99; p[0] = 5; A[0] = p[0]; TESTD("omp target parallel firstprivate(p, q) num_threads(1)", { A[0] += p[0] + q; p[0] = 2; q = 0; }, VERIFY(0, 1, A[i]+p[0]+q, (1+trial)*(99+5)+5+5+99)); } #if 0 INCORRECT CODEGEN // // Test: shared clause on parallel region. // ZERO(A); { double p[1], q; p[0] = 5; A[0] = p[0]; q = -7; TESTD("omp target parallel num_threads(2) shared(p, q)", { if (omp_get_thread_num() == 1) { p[0] = 99; q = 2; } _Pragma("omp barrier") if (omp_get_thread_num() == 0) A[0] += p[0] + q; _Pragma("omp barrier") p[0] = 1; q = -100; }, VERIFY(0, 1, A[i]+p[0]+q, (1+trial)*(99+2)+5+-7)); } #endif return 0; }
NETLMv2_fmt_plug.c
/* * NETLMv2_fmt.c -- LMv2 Challenge/Response * * Written by JoMo-Kun <jmk at foofus.net> in 2008 * and placed in the public domain. * * Performance fixes, OMP and utf-8 support by magnum 2010-2011 * * This algorithm is designed for performing brute-force cracking of the LMv2 * challenge/response sets exchanged during network-based authentication * attempts [1]. The captured challenge/response set from these attempts * should be stored using the following format: * * USERNAME::DOMAIN:SERVER CHALLENGE:LMv2 RESPONSE:CLIENT CHALLENGE * * For example: * Administrator::WORKGROUP:1122334455667788:6759A5A7EFB25452911DE7DE8296A0D8:F503236B200A5B3A * * It should be noted that a LMv2 authentication response is not same as a LM * password hash, which can be extracted using tools such as FgDump [2]. In * fact, a NTLM hash and not a LM hash is used within the LMv2 algorithm. LMv2 * challenge/response authentication typically takes place when the GPO * "Network Security: LAN Manager authentication level" is configured to a setting * that enforces the use of NTLMv2, such as "Send NTLMv2 response only\refuse * LM & NTLM." * * LMv2 responses can be gathered via normal network capture or via tools which * perform layer 2 attacks, such as Ettercap [3] and Cain [4]. The responses can * also be harvested using a modified Samba service [5] in conjunction with * some trickery to convince the user to connect to it. I leave what that * trickery may actually be as an exercise for the reader (HINT: Karma, NMB * broadcasts, IE, Outlook, social engineering, ...). * * [1] http://davenport.sourceforge.net/ntlm.html#theLmv2Response * [2] http://www.foofus.net/~fizzgig/fgdump/ * [3] http://ettercap.sourceforge.net/ * [4] http://www.oxid.it/cain.html * [5] http://www.foofus.net/jmk/smbchallenge.html * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_NETLMv2; #elif FMT_REGISTERS_H john_register_one(&fmt_NETLMv2); #else #include <stdint.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "options.h" #include "unicode.h" #include "md5.h" #include "hmacmd5.h" #include "byteorder.h" #include "memdbg.h" #ifndef uchar #define uchar unsigned char #endif #define FORMAT_LABEL "netlmv2" #define FORMAT_NAME "LMv2 C/R" #define FORMAT_TAG "$NETLMv2$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "MD4 HMAC-MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 /* lmcons.h - PWLEN (256) ? 127 ? */ #define USERNAME_LENGTH 60 /* lmcons.h - UNLEN (256) / LM20_UNLEN (20) */ #define DOMAIN_LENGTH 45 /* lmcons.h - CNLEN / DNLEN */ #define BINARY_SIZE 16 #define BINARY_ALIGN 4 #define CHALLENGE_LENGTH 32 #define SALT_SIZE 16 + 1 + 2 * (USERNAME_LENGTH + DOMAIN_LENGTH) + 1 #define SALT_ALIGN 4 #define CIPHERTEXT_LENGTH 32 #define TOTAL_LENGTH 12 + USERNAME_LENGTH + DOMAIN_LENGTH + CHALLENGE_LENGTH + CIPHERTEXT_LENGTH // these may be altered in init() if running OMP #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #ifndef OMP_SCALE #define OMP_SCALE 1536 #endif static struct fmt_tests tests[] = { {"", "1337adminPASS", {"FOODOM\\Administrator", "", "", "1122334455667788", "6F64C5C1E35F68DD80388C0F00F34406", "F0F3FF27037AA69F"} }, {"$NETLMv2$ADMINISTRATORFOODOM$1122334455667788$6F64C5C1E35F68DD80388C0F00F34406$F0F3FF27037AA69F", "1337adminPASS"}, {"$NETLMv2$USER1$1122334455667788$B1D163EA5881504F3963DC50FCDC26C1$EB4D9E8138149E20", "foobar"}, // repeat in exactly the same format that is used in john.pot (lower case hex) {"$NETLMv2$USER1$1122334455667788$b1d163ea5881504f3963dc50fcdc26c1$eb4d9e8138149e20", "foobar"}, {"$NETLMv2$ATEST$1122334455667788$83B59F1536D3321DBF1FAEC14ADB1675$A1E7281FE8C10E53", "SomeFancyP4$$w0rdHere"}, {"", "1337adminPASS", {"administrator", "", "FOODOM", "1122334455667788", "6F64C5C1E35F68DD80388C0F00F34406", "F0F3FF27037AA69F"} }, {"", "foobar", {"user1", "", "", "1122334455667788", "B1D163EA5881504F3963DC50FCDC26C1", "EB4D9E8138149E20"} }, {"", "SomeFancyP4$$w0rdHere", {"aTest", "", "", "1122334455667788", "83B59F1536D3321DBF1FAEC14ADB1675", "A1E7281FE8C10E53"} }, {NULL} }; static uchar (*saved_plain)[PLAINTEXT_LENGTH + 1]; static int (*saved_len); static uchar (*output)[BINARY_SIZE]; static HMACMD5Context (*saved_ctx); static int keys_prepared; static unsigned char *challenge; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); output = mem_calloc(self->params.max_keys_per_crypt, sizeof(*output)); saved_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_ctx)); } static void done(void) { MEM_FREE(saved_ctx); MEM_FREE(output); MEM_FREE(saved_len); MEM_FREE(saved_plain); } static int valid(char *ciphertext, struct fmt_main *self) { char *pos, *pos2; if (ciphertext == NULL) return 0; else if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0; pos = &ciphertext[FORMAT_TAG_LEN]; /* Validate Username and Domain Length */ for (pos2 = pos; *pos2 != '$'; pos2++) if ((unsigned char)*pos2 < 0x20) return 0; if ( !(*pos2 && (pos2 - pos <= USERNAME_LENGTH + DOMAIN_LENGTH)) ) return 0; /* Validate Server Challenge Length */ pos2++; pos = pos2; for (; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CHALLENGE_LENGTH / 2)) ) return 0; /* Validate LMv2 Response Length */ pos2++; pos = pos2; for (; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) ) return 0; /* Validate Client Challenge Length */ pos2++; pos = pos2; for (; atoi16[ARCH_INDEX(*pos2)] != 0x7F; pos2++); if (pos2 - pos != CHALLENGE_LENGTH / 2) return 0; if (pos2[0] != '\0') return 0; return 1; } static char *prepare(char *split_fields[10], struct fmt_main *self) { char *login = split_fields[0]; char *uid = split_fields[2]; char *srv_challenge = split_fields[3]; char *nethashv2 = split_fields[4]; char *cli_challenge = split_fields[5]; char *identity = NULL, *tmp; if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN)) return split_fields[1]; if (!login || !uid || !srv_challenge || !nethashv2 || !cli_challenge) return split_fields[1]; /* DOMAIN\USER: -or- USER::DOMAIN: */ if ((tmp = strstr(login, "\\")) != NULL) { identity = (char *) mem_alloc(strlen(login)*2 + 1); strcpy(identity, tmp + 1); /* Upper-Case Username - Not Domain */ enc_strupper(identity); strncat(identity, login, tmp - login); } else { identity = (char *) mem_alloc(strlen(login)*2 + strlen(uid) + 1); strcpy(identity, login); enc_strupper(identity); strcat(identity, uid); } tmp = (char *) mem_alloc(9 + strlen(identity) + 1 + strlen(srv_challenge) + 1 + strlen(nethashv2) + 1 + strlen(cli_challenge) + 1); sprintf(tmp, "%s%s$%s$%s$%s", FORMAT_TAG, identity, srv_challenge, nethashv2, cli_challenge); MEM_FREE(identity); if (valid(tmp, self)) { char *cp = str_alloc_copy(tmp); MEM_FREE(tmp); return cp; } MEM_FREE(tmp); return split_fields[1]; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TOTAL_LENGTH + 1]; char *pos = NULL; int identity_length = 0; /* Calculate identity length */ for (pos = ciphertext + FORMAT_TAG_LEN; *pos != '$'; pos++); identity_length = pos - (ciphertext + FORMAT_TAG_LEN); memset(out, 0, TOTAL_LENGTH + 1); memcpy(out, ciphertext, strlen(ciphertext)); strlwr(&out[FORMAT_TAG_LEN + identity_length + 1]); /* Exclude: $NETLMv2$USERDOMAIN$ */ return out; } static void *get_binary(char *ciphertext) { static uchar *binary; char *pos = NULL; int i, identity_length; if (!binary) binary = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); for (pos = ciphertext + FORMAT_TAG_LEN; *pos != '$'; pos++); identity_length = pos - (ciphertext + FORMAT_TAG_LEN); ciphertext += FORMAT_TAG_LEN + identity_length + 1 + CHALLENGE_LENGTH / 2 + 1; for (i=0; i<BINARY_SIZE; i++) { binary[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])])<<4; binary[i] |= (atoi16[ARCH_INDEX(ciphertext[i*2+1])]); } return binary; } /* Calculate the LMv2 response for the given challenge, using the specified authentication identity (username and domain), password and client nonce. */ static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int i = 0; #ifdef _OPENMP #pragma omp parallel for for (i = 0; i < count; i++) #endif { unsigned char ntlm_v2_hash[16]; HMACMD5Context ctx; // can't be moved above the OMP pragma if (!keys_prepared) { int len; unsigned char ntlm[16]; /* Generate 16-byte NTLM hash */ len = E_md4hash(saved_plain[i], saved_len[i], ntlm); // We do key setup of the next HMAC_MD5 here (once per salt) hmac_md5_init_K16(ntlm, &saved_ctx[i]); if (len <= 0) saved_plain[i][-len] = 0; // match truncation } /* HMAC-MD5(Username + Domain, NTLM Hash) */ memcpy(&ctx, &saved_ctx[i], sizeof(ctx)); hmac_md5_update(&challenge[17], (int)challenge[16], &ctx); hmac_md5_final(ntlm_v2_hash, &ctx); /* Generate 16-byte non-client nonce portion of LMv2 Response */ /* HMAC-MD5(Challenge + Nonce, NTLMv2 Hash) + Nonce */ hmac_md5(ntlm_v2_hash, challenge, 16, (unsigned char*)output[i]); } keys_prepared = 1; return count; } static int cmp_all(void *binary, int count) { int index; for (index=0; index<count; index++) if (!memcmp(output[index], binary, BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(output[index], binary, BINARY_SIZE); } static int cmp_exact(char *source, int index) { return !memcmp(output[index], get_binary(source), BINARY_SIZE); } /* We're essentially using three salts, but we're going to pack it into a single blob for now. |Client Challenge (8 Bytes)|Server Challenge (8 Bytes)|Unicode(Username (<=20).Domain (<=15)) */ static void *get_salt(char *ciphertext) { static unsigned char *binary_salt; unsigned char identity[USERNAME_LENGTH + DOMAIN_LENGTH + 1]; UTF16 identity_ucs2[USERNAME_LENGTH + DOMAIN_LENGTH + 1]; int i, identity_length; int identity_ucs2_length; char *pos = NULL; if (!binary_salt) binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD); memset(binary_salt, 0, SALT_SIZE); /* Calculate identity length */ for (pos = ciphertext + FORMAT_TAG_LEN; *pos != '$'; pos++); identity_length = pos - (ciphertext + FORMAT_TAG_LEN); /* Convert identity (username + domain) string to NT unicode */ strnzcpy((char *)identity, ciphertext + FORMAT_TAG_LEN, sizeof(identity)); identity_ucs2_length = enc_to_utf16((UTF16 *)identity_ucs2, USERNAME_LENGTH + DOMAIN_LENGTH, (UTF8 *)identity, identity_length) * sizeof(int16_t); if (identity_ucs2_length < 0) // Truncated at Unicode conversion. identity_ucs2_length = strlen16((UTF16 *)identity_ucs2) * sizeof(int16_t); binary_salt[16] = (unsigned char)identity_ucs2_length; memcpy(&binary_salt[17], (char *)identity_ucs2, identity_ucs2_length); /* Set server challenge */ ciphertext += FORMAT_TAG_LEN + identity_length + 1; for (i = 0; i < 8; i++) binary_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; /* Set client challenge */ ciphertext += 2 + CHALLENGE_LENGTH / 2 + CIPHERTEXT_LENGTH; for (i = 0; i < 8; ++i) binary_salt[i + 8] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; /* Return a concatenation of the server and client challenges and the identity value */ return (void*)binary_salt; } static void set_salt(void *salt) { challenge = salt; } static void set_key(char *key, int index) { saved_len[index] = strnzcpyn((char*)saved_plain[index], key, sizeof(*saved_plain)); keys_prepared = 0; } static char *get_key(int index) { return (char *)saved_plain[index]; } static int salt_hash(void *salt) { // Hash the client challenge (in case server salt was spoofed) return (*(uint32_t *)salt+8) & (SALT_HASH_SIZE - 1); } #define COMMON_GET_HASH_VAR output #include "common-get-hash.h" struct fmt_main fmt_NETLMv2 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_UNICODE | FMT_UTF8, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
diagsm_x_csc_n_row.c
#include "alphasparse/opt.h" #include "alphasparse/kernel.h" #include "alphasparse/util.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) {//assume A is square ALPHA_Number* diag=(ALPHA_Number*) alpha_malloc(A->rows*sizeof(ALPHA_Number)); memset(diag, '\0', A->rows * sizeof(ALPHA_Number)); ALPHA_INT num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT c = 0; c < A->cols; c++) { for (ALPHA_INT ai = A->cols_start[c]; ai < A->cols_end[c]; ai++) { ALPHA_INT ar = A->row_indx[ai]; if (ar == c) { diag[c] = A->values[ai]; } } } #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT r = 0; r < A->rows; ++r) { for (ALPHA_INT c = 0; c < columns; ++c) { ALPHA_Number t; alpha_mul(t, alpha, x[index2(r, c, ldx)]); alpha_div(y[index2(r, c, ldy)], t, diag[r]); } } alpha_free(diag); return ALPHA_SPARSE_STATUS_SUCCESS; }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(4*t2-Nz-4,8));t3<=min(min(min(floord(4*t2+Ny,8),floord(Nt+Ny-4,8)),floord(2*t1+Ny+1,8)),floord(4*t1-4*t2+Nz+Ny-1,8));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(4*t2-Nz-28,32)),ceild(8*t3-Ny-28,32));t4<=min(min(min(min(floord(4*t2+Nx,32),floord(Nt+Nx-4,32)),floord(2*t1+Nx+1,32)),floord(8*t3+Nx+4,32)),floord(4*t1-4*t2+Nz+Nx-1,32));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),8*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),8*t3+6),32*t4+30),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(32*t4,t5+1); ubv=min(32*t4+31,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
serial_tree_learner.h
/*! * Original work Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Modified work Copyright (c) 2020 Fabio Sigrist. All rights reserved. * Licensed under the Apache License Version 2.0 See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #include <LightGBM/dataset.h> #include <LightGBM/tree.h> #include <LightGBM/tree_learner.h> #include <LightGBM/utils/array_args.h> #include <LightGBM/utils/random.h> #include <string> #include <cmath> #include <cstdio> #include <memory> #include <random> #include <vector> #include "data_partition.hpp" #include "feature_histogram.hpp" #include "leaf_splits.hpp" #include "split_info.hpp" #ifdef USE_GPU // Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled. // This is necessary to pin the two arrays in memory and make transferring faster. #include <boost/align/aligned_allocator.hpp> #endif using namespace json11; namespace LightGBM { /*! \brief forward declaration */ class CostEfficientGradientBoosting; /*! * \brief Used for learning a tree by single machine */ class SerialTreeLearner: public TreeLearner { public: friend CostEfficientGradientBoosting; explicit SerialTreeLearner(const Config* config); ~SerialTreeLearner(); void Init(const Dataset* train_data, bool is_constant_hessian) override; void ResetTrainingData(const Dataset* train_data) override; void ResetConfig(const Config* config) override; Tree* Train(const score_t* gradients, const score_t *hessians, bool is_constant_hessian, const Json& forced_split_json) override; Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override; Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred, const score_t* gradients, const score_t* hessians) override; void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override { data_partition_->SetUsedDataIndices(used_indices, num_data); } void AddPredictionToScore(const Tree* tree, double* out_score) const override { if (tree->num_leaves() <= 1) { return; } CHECK(tree->num_leaves() <= data_partition_->num_leaves()); #pragma omp parallel for schedule(static) for (int i = 0; i < tree->num_leaves(); ++i) { double output = static_cast<double>(tree->LeafOutput(i)); data_size_t cnt_leaf_data = 0; auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data); for (data_size_t j = 0; j < cnt_leaf_data; ++j) { out_score[tmp_idx[j]] += output; } } } void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter, data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override; void GetDataLeafIndices(Tree* tree, int* data_leaf_index) const override; protected: virtual std::vector<int8_t> GetUsedFeatures(bool is_tree_level); /*! * \brief Some initial works before training */ virtual void BeforeTrain(); /*! * \brief Some initial works before FindBestSplit */ virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf); virtual void FindBestSplits(); virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); /*! * \brief Partition tree and data according best split. * \param tree Current tree, will be splitted on this function. * \param best_leaf The index of leaf that will be splitted. * \param left_leaf The index of left leaf after splitted. * \param right_leaf The index of right leaf after splitted. */ virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf); /* Force splits with forced_split_json dict and then return num splits forced.*/ virtual int32_t ForceSplits(Tree* tree, const Json& forced_split_json, int* left_leaf, int* right_leaf, int* cur_depth, bool *aborted_last_force_split); /*! * \brief Get the number of data in a leaf * \param leaf_idx The index of leaf * \return The number of data in the leaf_idx leaf */ inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const; /*! \brief number of data */ data_size_t num_data_; /*! \brief number of features */ int num_features_; /*! \brief training data */ const Dataset* train_data_; /*! \brief gradients of current iteration */ const score_t* gradients_; /*! \brief hessians of current iteration */ const score_t* hessians_; /*! \brief training data partition on leaves */ std::unique_ptr<DataPartition> data_partition_; /*! \brief used for generate used features */ Random random_; /*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */ std::vector<int8_t> is_feature_used_; /*! \brief used feature indices in current tree */ std::vector<int> used_feature_indices_; /*! \brief pointer to histograms array of parent of current leaves */ FeatureHistogram* parent_leaf_histogram_array_; /*! \brief pointer to histograms array of smaller leaf */ FeatureHistogram* smaller_leaf_histogram_array_; /*! \brief pointer to histograms array of larger leaf */ FeatureHistogram* larger_leaf_histogram_array_; /*! \brief store best split points for all leaves */ std::vector<SplitInfo> best_split_per_leaf_; /*! \brief store best split per feature for all leaves */ std::vector<SplitInfo> splits_per_leaf_; /*! \brief stores best thresholds for all feature for smaller leaf */ std::unique_ptr<LeafSplits> smaller_leaf_splits_; /*! \brief stores best thresholds for all feature for larger leaf */ std::unique_ptr<LeafSplits> larger_leaf_splits_; std::vector<int> valid_feature_indices_; #ifdef USE_GPU /*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_; #else /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t> ordered_hessians_; #endif /*! \brief Store ordered bin */ std::vector<std::unique_ptr<OrderedBin>> ordered_bins_; /*! \brief True if has ordered bin */ bool has_ordered_bin_ = false; /*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */ std::vector<char> is_data_in_leaf_; /*! \brief used to cache historical histogram to speed up*/ HistogramPool histogram_pool_; /*! \brief config of tree learner*/ const Config* config_; int num_threads_; std::vector<int> ordered_bin_indices_; bool is_constant_hessian_; std::unique_ptr<CostEfficientGradientBoosting> cegb_; }; inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const { if (leaf_idx >= 0) { return data_partition_->leaf_count(leaf_idx); } else { return 0; } } } // namespace LightGBM #endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
GB_unaryop__abs_uint8_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint8_fp32 // op(A') function: GB_tran__abs_uint8_fp32 // C type: uint8_t // A type: float // cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint8_t z ; GB_CAST_UNSIGNED(z,x,8) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT8 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint8_fp32 ( uint8_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint8_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp-axpy.c
// // omp-axpy.c // // // Created by Yaying Shi on 10/2/19. // #include "omp-axpy.h" void axpy(int N, float *Y, float *X, float a) { int i,j; #pragma omp target map(to:X[0:N]) map(tofrom:Y[0:N]) #pragma omp parallel for for (i = 0; i < N; ++i){ Y[i] += a * X[i]; printf("this a tset: %f %f\n",X[i],Y[i]); } } int main(int argc, char*argv[]){ int N = 100; float Y[N], X[N]; float x = 5.0; for (int i = 0; i <N; i++){ Y[i] = (((float)rand()/(float)(10)) * x); X[i] = (((float)rand()/(float)(10)) * x); printf("this is Y: %f\n",Y[i]); } float a = 0.5; axpy(N,&Y[0],&X[0],a); return 0; }
GB_unop__identity_fc32_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc32_int8) // op(A') function: GB (_unop_tran__identity_fc32_int8) // C type: GxB_FC32_t // A type: int8_t // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc32_int8) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc32_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
crivello_eratostene-seriale-main-gettime.c
/* compilation: * gcc crivello_eratostene.c -fopenmp -lgmp -o crivello_eratostene */ #include <stdlib.h> #include <stdio.h> #include <omp.h> #include <gmp.h> #include <time.h> void eratosthenes_sieve(int * sieve, long unsigned n) { long unsigned i; for(i = 0; i <= n/2; ++i) sieve[i] = 1; //#pragma omp parallel for schedule(dynamic) for(i = 3; i <= n; i += 2) { if(i*i > n) i = n; if(sieve[i/2] == 1) { long unsigned j; for(j = i; j <= n/i; j++) sieve[(i*j)/2] = 0; } } } int main(int argc, char * argv[]) { long unsigned n = atoll(argv[1]); int * sieve = (int *) malloc(sizeof(int) * (n/2)); #ifdef TIME struct timeval tempo; gettimeofday(&tempo,0); double t1=tempo.tv_sec+(tempo.tv_usec/1000000.0); #endif eratosthenes_sieve(sieve, n); #ifdef TIME gettimeofday(&tempo,0); double t2=tempo.tv_sec+(tempo.tv_usec/1000000.0); printf("%ld %.6f\n", n, t2-t1); #endif #ifdef VERBOSE printf("#"); long unsigned i; for(i = 2; i < n+1; ++i) if(sieve[i] == 1) printf("%lu ", i); printf("\n"); #endif //free(sieve); return 0; }
GB_unop__identity_int64_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int64_int32) // op(A') function: GB (_unop_tran__identity_int64_int32) // C type: int64_t // A type: int32_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = (int64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int64_int32) ( int64_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; int64_t z = (int64_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; int64_t z = (int64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int64_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__ainv_uint16_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint16_fp64 // op(A') function: GB_tran__ainv_uint16_fp64 // C type: uint16_t // A type: double // cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16) // unaryop: cij = -aij #define GB_ATYPE \ double #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint16_t z ; GB_CAST_UNSIGNED(z,x,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT16 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint16_fp64 ( uint16_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint16_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mixed_tentusscher_myo_epi_2004_S2_17.c
// Scenario 2 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S2_17.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5285006584511,0.00130106729313035,0.778730090563051,0.778532170509002,0.000175864034699588,0.484676327494511,0.00294864118836231,0.999998334805594,1.94635926887894e-08,1.90111810990968e-05,0.999770708859905,1.00748136518757,0.999998809936904,3.60224813237435e-05,1.18254991511234,9.21308723760909,140.066635187809}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={13.7219011711698,0.000373800660274715,0.000150569617335446,0.000654485626385041,0.257379206595380,0.173802542474158,0.132458241657246,3.93296187661537,0.0158924919170214,2.50168625879054,1095.95864752453,0.000511327811652900,0.243193135425503,0.0192821673745436,0.00636346797017134,9.00104876078144e-06}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
GB_unop__lnot_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__lnot_fp32_fp32 // op(A') function: GB_unop_tran__lnot_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = !(z != 0) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__lnot_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = !(z != 0) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = !(z != 0) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__lnot_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
MeshUtilities.h
/* * MeshUtilities.cpp * Copyright (C) 2020 by MegaMol Team * Alle Rechte vorbehalten. */ #pragma once #include "glm/glm.hpp" #include "igl/boundary_loop.h" #include "igl/decimate.h" #include "igl/lscm.h" #include "mesh/MeshCalls.h" #include "nanoflann.hpp" #include <random> namespace megamol { namespace probe { inline float coulomb_force(float r) { return (1.0f / pow(r, 2)); } // stackoverflow.com/questions/2550229/how-to-keep-only-duplicates-efficiently template<class I, class P> I remove_unique(I first, I last, P pred = P()) { I dest = first; while ((first = std::adjacent_find(first, last, pred)) != last) { *dest = *first; ++first; ++dest; if ((first = std::adjacent_find(first, last, std::not2(pred))) == last) break; ++first; } return dest; } template<class I> I remove_unique(I first, I last) { return remove_unique(first, last, std::equal_to<typename std::iterator_traits<I>::value_type>()); } template<typename Derived> struct MeshAdaptor { typedef float coord_t; const Derived obj; //!< A const ref to the data set origin size_t point_count = 0; /// The constructor that sets the data set source MeshAdaptor(const Derived& obj_) : obj(obj_) { auto bs = derived()->byte_size; auto ts = mesh::MeshDataAccessCollection::getByteSize(derived()->component_type); auto tc = derived()->component_cnt; point_count = static_cast<size_t>(bs / (ts * tc)); } /// CRTP helper method inline const Derived& derived() const { return obj; } // Must return the number of data points inline size_t kdtree_get_point_count() const { return point_count; } // Returns the dim'th component of the idx'th point in the class: // Since this is inlined and the "dim" argument is typically an immediate value, the // "if/else's" are actually solved at compile time. inline coord_t kdtree_get_pt(const size_t idx, const size_t dim) const { if (dim == 0) return derived()->data[mesh::MeshDataAccessCollection::getByteSize(derived()->component_type) * (idx + 0)]; else if (dim == 1) return derived()->data[mesh::MeshDataAccessCollection::getByteSize(derived()->component_type) * (idx + 1)]; else return derived()->data[mesh::MeshDataAccessCollection::getByteSize(derived()->component_type) * (idx + 2)]; } // Optional bounding-box computation: return false to default to a standard bbox computation loop. // Return true if the BBOX was already computed by the class and returned in "bb" so it can be avoided to redo it // again. Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds) template<class BBOX> bool kdtree_get_bbox(BBOX& /*bb*/) const { return false; } }; class MeshUtility { public: void inputData(const mesh::MeshDataAccessCollection::Mesh& mesh_ptr) { this->_mesh = mesh_ptr; for (int i = 0; i < this->_mesh.attributes.size(); ++i) { if (this->_mesh.attributes[i].semantic == mesh::MeshDataAccessCollection::AttributeSemanticType::POSITION) { this->_pos_attribute_idx = i; _va_ptr = &this->_mesh.attributes[i]; } else if (this->_mesh.attributes[i].semantic == mesh::MeshDataAccessCollection::AttributeSemanticType::NORMAL) { this->_normal_attribute_idx = i; } } _mesh_indices = this->_mesh.indices; this->convertToEigenMatrices(); this->createOrthonormalBasis(); // this->buildKDTree(_va_ptr); } bool convertToMesh() { _mesh_attribs.resize(1); _mesh_attribs[0].component_type = mesh::MeshDataAccessCollection::ValueType::FLOAT; _mesh_attribs[0].byte_size = _mesh_vertices.size() * sizeof(float); _mesh_attribs[0].component_cnt = 3; _mesh_attribs[0].stride = 3 * sizeof(float); _mesh_attribs[0].offset = 0; _mesh_attribs[0].data = reinterpret_cast<uint8_t*>(_mesh_vertices.data()); _mesh_attribs[0].semantic = mesh::MeshDataAccessCollection::POSITION; //_mesh_attribs[1].component_type = mesh::MeshDataAccessCollection::ValueType::FLOAT; //_mesh_attribs[1].byte_size = _normals.size() * sizeof(std::array<float, 3>); //_mesh_attribs[1].component_cnt = 3; //_mesh_attribs[1].stride = sizeof(std::array<float, 3>); //_mesh_attribs[1].offset = 0; //_mesh_attribs[1].data = reinterpret_cast<uint8_t*>(_normals.data()); //_mesh_attribs[1].semantic = mesh::MeshDataAccessCollection::NORMAL; _mesh_indices.type = mesh::MeshDataAccessCollection::ValueType::UNSIGNED_INT; _mesh_indices.byte_size = _mesh_faces.size() * sizeof(uint32_t); _mesh_indices.data = reinterpret_cast<uint8_t*>(_mesh_faces.data()); return true; } uint32_t getNumTotalFaces() { return _faces.rows(); } float calcTriangleArea(uint32_t idx) { if (idx >= this->_faces.rows()) { megamol::core::utility::log::Log::DefaultLog.WriteError("[MeshUtility] Id is out of range"); return -1; } Eigen::Matrix<int, 3, 2> other_indices; //other_indices << 1, 0, 0, 2, 2, 1; other_indices << 1, 2, 0, 2, 0, 1; auto vertices = this->getTriangleVertices(idx); std::array<glm::vec3, 3> edges = { vertices[1] - vertices[0], vertices[2] - vertices[1], vertices[0] - vertices[2]}; std::array<float, 3> edge_lengths = {glm::length(edges[0]), glm::length(edges[1]), glm::length(edges[2])}; int longest_edge = std::distance(edge_lengths.begin(), std::max_element(edge_lengths.begin(), edge_lengths.end())); float proj_on_longest_edge = glm::dot(glm::normalize(edges[longest_edge]), glm::normalize(edges[other_indices(longest_edge, 0)])); auto to_cut = edges[longest_edge] * std::abs(proj_on_longest_edge); auto e = to_cut + vertices[longest_edge]; auto e_o1 = e - vertices[other_indices(longest_edge, 0)]; auto e_o2 = e - vertices[other_indices(longest_edge, 1)]; auto area = (glm::length(to_cut) * glm::length(e_o1)) / 2.0f + (glm::length(e_o1) * glm::length(e_o2)) / 2.0f; return area; } std::vector<uint32_t> getNeighboringTriangles(uint32_t idx, int min_common_points = 2) { // std::vector<uint32_t> result; // for (uint32_t i = 0; i < this->_faces.rows(); ++i) { // int common_points = 0; // for (int j = 0; j < this->_faces.cols(); ++j) { // for (int k = 0; k < this->_faces.cols(); ++k) { // if (this->_faces(idx, k) == this->_faces(i, j)) common_points++; // } // } // if (common_points >= min_common_points) { // result.emplace_back(i); // } //} // return result; std::vector<uint32_t> neighboring_triangles; std::array<uint32_t, 3> indices; indices[0] = this->_faces(idx, 0); indices[1] = this->_faces(idx, 1); indices[2] = this->_faces(idx, 2); for (auto index : indices) { auto it = _std_faces.begin(); while (it != _std_faces.end()) { it = std::find(it, _std_faces.end(), index); if (it != _std_faces.end()) { neighboring_triangles.emplace_back(std::distance(_std_faces.begin(), it) / 3); ++it; } } } //std::sort(neighboring_triangles.begin(), neighboring_triangles.end()); //auto last = std::unique(neighboring_triangles.begin(), neighboring_triangles.end()); //neighboring_triangles.erase(last, neighboring_triangles.end()); std::sort(neighboring_triangles.begin(), neighboring_triangles.end()); auto rmv = remove_unique(neighboring_triangles.begin(), neighboring_triangles.end()); neighboring_triangles.erase(rmv, neighboring_triangles.end()); return neighboring_triangles; } float getLongestEdgeLength(uint32_t idx) { glm::vec3 v0 = {_vertices(_faces(idx, 0), 0), _vertices(_faces(idx, 0), 1), _vertices(_faces(idx, 0), 2)}; glm::vec3 v1 = {_vertices(_faces(idx, 1), 0), _vertices(_faces(idx, 1), 1), _vertices(_faces(idx, 1), 2)}; glm::vec3 v2 = {_vertices(_faces(idx, 2), 0), _vertices(_faces(idx, 2), 1), _vertices(_faces(idx, 2), 2)}; auto e0 = v0 - v1; auto e1 = v0 - v2; auto e2 = v1 - v2; return std::max(std::max(glm::length(e0), glm::length(e1)), glm::length(e2)); } void UVMapping(const Eigen::MatrixXi& faces, const Eigen::MatrixXd& vertices, Eigen::MatrixXd& vertices_uv) { // Fix two points on the boundary Eigen::VectorXi bnd, b(2, 1); igl::boundary_loop(faces, bnd); b(0) = bnd(0); b(1) = bnd(round(bnd.size() / 2)); Eigen::MatrixXd bc(2, 2); bc << 0, 0, 1, 0; igl::lscm(vertices, faces, b, bc, vertices_uv); } bool orthogonalProjection(uint32_t idx, const Eigen::MatrixXd& points, Eigen::MatrixXd& projected_points) { glm::vec3 u; glm::vec3 v; for (int i = 0; i < 3; ++i) { u[i] = _vertices(_faces(idx, 1), i) - _vertices(_faces(idx, 0), i); v[i] = _vertices(_faces(idx, 2), i) - _vertices(_faces(idx, 0), i); } projected_points.resizeLike(points); for (int i = 0; i < points.rows(); ++i) { glm::vec3 point; for (int j = 0; j < 3; ++j) { point[j] = points(i, j); } auto projection = (glm::dot(point, u) / glm::dot(u, u)) * u + (glm::dot(point, v) / glm::dot(v, v)) * v; for (int j = 0; j < 3; ++j) { projected_points(i, j) = projection[j]; } } return true; } // Performs an orthonormal transformation into the basis of triangle idx // returns a 3D matrix where the n-dimension (3rd entry) can be omitted to get the 2D representation bool perform2Dprojection(uint32_t idx, const Eigen::MatrixXd& points, Eigen::MatrixXd& projected_points) { projected_points.resize(points.rows(), 3); // now solve the system to get the points in the new basis Eigen::Matrix3d A; A << _orthonormalBasis[idx][0].x, _orthonormalBasis[idx][1].x, _orthonormalBasis[idx][2].x, _orthonormalBasis[idx][0].y, _orthonormalBasis[idx][1].y, _orthonormalBasis[idx][2].y, _orthonormalBasis[idx][0].z, _orthonormalBasis[idx][1].z, _orthonormalBasis[idx][2].z; // A << u.x, u.y, u.z, v.x, v.y, v.z, n.x, n.y, n.z; for (int i = 0; i < points.rows(); ++i) { Eigen::Vector3d b; b << points(i, 0), points(i, 1), points(i, 2); auto proj_point = A.colPivHouseholderQr().solve(b); for (int j = 0; j < projected_points.cols(); ++j) { projected_points(i, j) = proj_point[j]; } } return true; } bool performInverse2Dprojection(uint32_t idx, const Eigen::MatrixXd& points, Eigen::MatrixXd& projected_points) { projected_points.resize(points.rows(), 3); #pragma omp parallel for for (int i = 0; i < points.rows(); ++i) { for (int j = 0; j < projected_points.cols(); ++j) { projected_points(i, j) = points(i, 0) * _orthonormalBasis[idx][0][j] + points(i, 1) * _orthonormalBasis[idx][1][j] + points(i, 2) * _orthonormalBasis[idx][2][j]; } } return true; } std::vector<glm::vec3> seedPoints(const uint32_t idx, const int num_pts) { std::mt19937 rnd; rnd.seed(std::random_device()()); // rnd.seed(666); std::uniform_real_distribution<float> fltdist(0, 1); // get triangle vertices auto v0_idx = this->_faces(idx, 0); auto v1_idx = this->_faces(idx, 1); auto v2_idx = this->_faces(idx, 2); std::vector<glm::vec3> seededPoints(num_pts); for (int i = 0; i < num_pts; ++i) { // $P = (1 - \sqrt{r_1}) A + (\sqrt{r_1}(1 - r_2)) B + (r_2 \sqrt{r_1})C$ auto rnd_u = fltdist(rnd); auto rnd_v = fltdist(rnd); glm::vec3 point; point[0] = (1 - std::sqrt(rnd_u)) * this->_vertices(v0_idx, 0) + (std::sqrt(rnd_u) * (1 - rnd_v)) * this->_vertices(v1_idx, 0) + (rnd_v * std::sqrt(rnd_u)) * this->_vertices(v2_idx, 0); point[1] = (1 - std::sqrt(rnd_u)) * this->_vertices(v0_idx, 1) + (std::sqrt(rnd_u) * (1 - rnd_v)) * this->_vertices(v1_idx, 1) + (rnd_v * std::sqrt(rnd_u)) * this->_vertices(v2_idx, 1); point[2] = (1 - std::sqrt(rnd_u)) * this->_vertices(v0_idx, 2) + (std::sqrt(rnd_u) * (1 - rnd_v)) * this->_vertices(v1_idx, 2) + (rnd_v * std::sqrt(rnd_u)) * this->_vertices(v2_idx, 2); seededPoints[i] = point; } return seededPoints; } bool seedPoints(const uint32_t idx, const int num_pts, Eigen::MatrixXd& result) { std::mt19937 rnd; rnd.seed(std::random_device()()); // rnd.seed(666); std::uniform_real_distribution<float> fltdist(0, 1); // get triangle vertices auto v0_idx = this->_faces(idx, 0); auto v1_idx = this->_faces(idx, 1); auto v2_idx = this->_faces(idx, 2); auto v0_0 = this->_vertices(v0_idx, 0); auto v0_1 = this->_vertices(v0_idx, 1); auto v0_2 = this->_vertices(v0_idx, 2); auto v1_0 = this->_vertices(v1_idx, 0); auto v1_1 = this->_vertices(v1_idx, 1); auto v1_2 = this->_vertices(v1_idx, 2); auto v2_0 = this->_vertices(v2_idx, 0); auto v2_1 = this->_vertices(v2_idx, 1); auto v2_2 = this->_vertices(v2_idx, 2); result.resize(num_pts, 3); for (int i = 0; i < num_pts; ++i) { // $P = (1 - \sqrt{r_1}) A + (\sqrt{r_1}(1 - r_2)) B + (r_2 \sqrt{r_1})C$ auto rnd_u = fltdist(rnd); auto rnd_v = fltdist(rnd); glm::vec3 result_vec; for (int j = 0; j < result.cols(); ++j) { //result(i, j) = this->_vertices(v0_idx, j) + // rnd_u * (this->_vertices(v1_idx, j) - this->_vertices(v0_idx, j)) + rnd_v * // (this->_vertices(v2_idx, j) - this->_vertices(v0_idx, j)); result(i, j) = (1 - std::sqrt(rnd_u)) * this->_vertices(v0_idx, j) + (std::sqrt(rnd_u) * (1 - rnd_v)) * this->_vertices(v1_idx, j) + (rnd_v * std::sqrt(rnd_u)) * this->_vertices(v2_idx, j); result_vec[j] = result(i, j); } } return true; } std::array<glm::vec3, 3> getTriangleVertices(const uint32_t idx) { std::array<glm::vec3, 3> verts; for (int i = 0; i < 3; ++i) { glm::vec3 vert; for (int j = 0; j < 3; ++j) { vert[j] = this->_vertices(this->_faces(idx, i), j); } verts[i] = vert; } return verts; } bool getTriangleVertices(const uint32_t idx, Eigen::Matrix3d& verts) { for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { verts(i, j) = this->_vertices(this->_faces(idx, i), j); } } return true; } bool pointInTriangle(const uint32_t idx, glm::vec3 p) { auto vertices = this->getTriangleVertices(idx); // Compute vectors auto v0 = vertices[2] - vertices[0]; auto v1 = vertices[1] - vertices[0]; auto v2 = p - vertices[0]; // Compute dot products auto dot00 = glm::dot(v0, v0); auto dot01 = glm::dot(v0, v1); auto dot02 = glm::dot(v0, v2); auto dot11 = glm::dot(v1, v1); auto dot12 = glm::dot(v1, v2); // Compute barycentric coordinates auto invDenom = 1 / (dot00 * dot11 - dot01 * dot01); auto u = (dot11 * dot02 - dot01 * dot12) * invDenom; auto v = (dot00 * dot12 - dot01 * dot02) * invDenom; // Check if point is in triangle bool res = (u > 0) && (v > 0) && (u + v < 1); if (res) return true; return false; } bool pointInTriangle(const std::array<glm::vec3, 3>& vertices, glm::vec3 p) { // Compute vectors auto v0 = vertices[2] - vertices[0]; auto v1 = vertices[1] - vertices[0]; auto v2 = p - vertices[0]; // Compute dot products auto dot00 = glm::dot(v0, v0); auto dot01 = glm::dot(v0, v1); auto dot02 = glm::dot(v0, v2); auto dot11 = glm::dot(v1, v1); auto dot12 = glm::dot(v1, v2); // Compute barycentric coordinates auto invDenom = 1 / (dot00 * dot11 - dot01 * dot01); auto u = (dot11 * dot02 - dot01 * dot12) * invDenom; auto v = (dot00 * dot12 - dot01 * dot02) * invDenom; // Check if point is in triangle bool res = (u > 0) && (v > 0) && (u + v < 1); if (res) return true; return false; } bool pointInTriangle(const std::array<glm::vec2, 3>& vertices, glm::vec2 p) { const auto signed_area = 0.5 * (-vertices[1].y * vertices[2].x + vertices[0].y * (-vertices[1].x + vertices[2].x) + vertices[0].x * (vertices[1].y - vertices[2].y) + vertices[1].x * vertices[2].y); const auto s = 1 / (2 * signed_area) * (vertices[0].y * vertices[2].x - vertices[0].x * vertices[2].y + (vertices[2].y - vertices[0].y) * p.x + (vertices[0].x - vertices[2].x) * p.y); const auto t = 1 / (2 * signed_area) * (vertices[0].x * vertices[1].y - vertices[0].y * vertices[1].x + (vertices[0].y - vertices[1].y) * p.x + (vertices[1].x - vertices[0].x) * p.y); return (s > 0 && t > 0 && 1 - s - t > 0); //const auto d1 = this->sign(p, vertices[0], vertices[1]); //const auto d2 = this->sign(p, vertices[1], vertices[2]); //const auto d3 = this->sign(p, vertices[2], vertices[0]); //const auto has_neg = (d1 < 0) || (d2 < 0) || (d3 < 0); //const auto has_pos = (d1 > 0) || (d2 > 0) || (d3 > 0); //return !(has_neg && has_pos); } bool getPatch(uint32_t idx, Eigen::MatrixXd& out_verts, Eigen::MatrixXi& out_indices, const std::vector<uint32_t>& neighbors_ = std::vector<uint32_t>()) { std::vector<uint32_t> neighbors; // check for triangles with two common vertices if (neighbors_.empty()) { neighbors = this->getNeighboringTriangles(idx); } else { neighbors = neighbors_; } assert(neighbors.size() > 3); // auto idx_it = std::find(neighbors.begin(), neighbors.end(), idx); // neighbors.erase(idx_it); out_indices.resize(neighbors.size(), 3); std::vector<std::pair<uint32_t, uint32_t>> index_mapper(3); // put the main triangle in the index mapper index_mapper[0] = std::make_pair(0, this->_faces(idx, 0)); index_mapper[1] = std::make_pair(1, this->_faces(idx, 1)); index_mapper[2] = std::make_pair(2, this->_faces(idx, 2)); // find the other indices for (int i = 1; i < neighbors.size(); ++i) { for (int j = 0; j < this->_faces.cols(); ++j) { std::vector<int> check_index; for (int k = 0; k < index_mapper.size(); ++k) { if (this->_faces(neighbors[i], j) == index_mapper[k].second) { check_index.emplace_back(k); } } if (check_index.empty()) { // make sure that there are no double indices bool fill = true; for (int n = 0; n < index_mapper.size(); ++n) { if (this->_faces(neighbors[i], j) == index_mapper[n].second) { fill = false; } } if (fill) { uint32_t id = index_mapper.size(); index_mapper.emplace_back(id, this->_faces(neighbors[i], j)); } } } } out_verts.resize(index_mapper.size(), 3); // fill the out vertices for (int i = 0; i < index_mapper.size(); ++i) { for (int j = 0; j < 3; ++j) { out_verts(index_mapper[i].first, j) = this->_vertices(index_mapper[i].second, j); } } // fill the out indices for (int i = 0; i < neighbors.size(); ++i) { for (int j = 0; j < this->_faces.cols(); ++j) { for (int k = 0; k < index_mapper.size(); ++k) { if (this->_faces(neighbors[i], j) == index_mapper[k].second) { out_indices(i, j) = static_cast<int>(index_mapper[k].first); } } } } return true; } void fillMeshVertices(const Eigen::MatrixXd& in_vertices, std::vector<float>& out_vertices) { out_vertices.resize(in_vertices.rows() * 3); #pragma omp parallel for for (int i = 0; i < in_vertices.rows(); ++i) { for (int j = 0; j < in_vertices.cols(); ++j) { out_vertices[3 * i + j] = static_cast<float>(in_vertices(i, j)); } if (_vertices.cols() < 3) { out_vertices[3 * i + 2] = 0; } } } void fillMeshFaces(const Eigen::MatrixXi& in_faces, std::vector<uint32_t>& out_faces) { out_faces.resize(in_faces.rows() * in_faces.cols()); #pragma omp parallel for for (int i = 0; i < in_faces.rows(); ++i) { for (int j = 0; j < in_faces.cols(); ++j) { out_faces[in_faces.cols() * i + j] = static_cast<uint32_t>(in_faces(i, j)); } } } private: int nearestKSearch(const std::array<float, 3>& point, int k, std::vector<uint32_t>& k_indices, std::vector<float>& k_distances) const { k_indices.resize(k); k_distances.resize(k); std::vector<float> query(3); nanoflann::KNNResultSet<float, uint32_t, int> resultSet(k); resultSet.init(k_indices.data(), k_distances.data()); this->_kd_tree->findNeighbors(resultSet, point.data(), nanoflann::SearchParams(10)); return (k); } void buildKDTree(const mesh::MeshDataAccessCollection::VertexAttribute* pos_attr) { if (this->_mesh.attributes[_pos_attribute_idx].data == nullptr) { megamol::core::utility::log::Log::DefaultLog.WriteError( "[MeshUtility] Cannot construct KD Tree. No mesh set."); return; } this->_kd_tree = std::make_shared<NanoFlannIndex>(3, pos_attr, ::nanoflann::KDTreeSingleIndexAdaptorParams(15)); this->_kd_tree->buildIndex(); } template<typename T> void fillVertexMatrix(const T vert_data) { #pragma omp parallel for for (int i = 0; i < _vertices.rows(); ++i) { _vertices(i, 0) = static_cast<double>(vert_data[this->_mesh.attributes[_pos_attribute_idx].component_cnt * i + 0]); _vertices(i, 1) = static_cast<double>(vert_data[this->_mesh.attributes[_pos_attribute_idx].component_cnt * i + 1]); _vertices(i, 2) = static_cast<double>(vert_data[this->_mesh.attributes[_pos_attribute_idx].component_cnt * i + 2]); } } template<typename T> void fillNormalMatrix(const T vert_data) { #pragma omp parallel for for (int i = 0; i < _normals.rows(); ++i) { _normals(i, 0) = static_cast<double>(vert_data[this->_mesh.attributes[_normal_attribute_idx].component_cnt * i + 0]); _normals(i, 1) = static_cast<double>(vert_data[this->_mesh.attributes[_normal_attribute_idx].component_cnt * i + 1]); _normals(i, 2) = static_cast<double>(vert_data[this->_mesh.attributes[_normal_attribute_idx].component_cnt * i + 2]); } } template<typename T> void fillFaceMatrix(const T faces) { #pragma omp parallel for for (int j = 0; j < _faces.rows(); ++j) { _faces(j, 0) = static_cast<int>(faces[3 * j + 0]); _faces(j, 1) = static_cast<int>(faces[3 * j + 1]); _faces(j, 2) = static_cast<int>(faces[3 * j + 2]); } } bool convertToEigenMatrices() { _vertices.resize(this->_mesh.attributes[_pos_attribute_idx].byte_size / (this->_mesh.attributes[_pos_attribute_idx].component_cnt * mesh::MeshDataAccessCollection::getByteSize( this->_mesh.attributes[_pos_attribute_idx].component_type)), this->_mesh.attributes[_pos_attribute_idx].component_cnt); const auto indices = this->_mesh.indices; _faces.resize(indices.byte_size / (3 * mesh::MeshDataAccessCollection::getByteSize(indices.type)), 3); switch (indices.type) { case mesh::MeshDataAccessCollection::UNSIGNED_SHORT: { auto face_data = reinterpret_cast<unsigned short*>(indices.data); this->fillFaceMatrix(face_data); } break; case mesh::MeshDataAccessCollection::UNSIGNED_INT: { auto face_data = reinterpret_cast<uint32_t*>(indices.data); this->fillFaceMatrix(face_data); } break; case mesh::MeshDataAccessCollection::INT: { auto face_data = reinterpret_cast<int*>(indices.data); this->fillFaceMatrix(face_data); } break; } switch (this->_mesh.attributes[_pos_attribute_idx].component_type) { case mesh::MeshDataAccessCollection::FLOAT: { auto vert_data = reinterpret_cast<float*>(this->_mesh.attributes[_pos_attribute_idx].data); this->fillVertexMatrix(vert_data); } break; case mesh::MeshDataAccessCollection::DOUBLE: { auto vert_data = reinterpret_cast<double*>(this->_mesh.attributes[_pos_attribute_idx].data); this->fillVertexMatrix(vert_data); } break; } // faces also as stl vector for quick find // std::vector<int> tmp_faces(_faces.data(), _faces.data() + _faces.rows() * _faces.cols()); _std_faces.resize(_faces.rows() * _faces.cols()); #pragma omp parallel for for (int i = 0; i < _faces.rows(); ++i) { for (int j = 0; j < _faces.cols(); ++j) { _std_faces[_faces.cols() * i + j] = _faces(i, j); } } // get normals if there are some if (this->_normal_attribute_idx != -1) { _normals.resize(this->_mesh.attributes[_normal_attribute_idx].byte_size / (this->_mesh.attributes[_normal_attribute_idx].component_cnt * mesh::MeshDataAccessCollection::getByteSize( this->_mesh.attributes[_normal_attribute_idx].component_type)), this->_mesh.attributes[_normal_attribute_idx].component_cnt); switch (this->_mesh.attributes[_normal_attribute_idx].component_type) { case mesh::MeshDataAccessCollection::FLOAT: { auto normal_data = reinterpret_cast<float*>(this->_mesh.attributes[_pos_attribute_idx].data); this->fillNormalMatrix(normal_data); } break; case mesh::MeshDataAccessCollection::DOUBLE: { auto normal_data = reinterpret_cast<double*>(this->_mesh.attributes[_pos_attribute_idx].data); this->fillNormalMatrix(normal_data); } break; } } return true; } float sign(const glm::vec2 p1, const glm::vec2 p2, const glm::vec2 p3) { return (p1.x - p3.x) * (p2.y - p3.y) - (p2.x - p3.x) * (p1.y - p3.y); } bool createOrthonormalBasis() { _orthonormalBasis.resize(_faces.rows()); #pragma omp parallel for for (int idx = 0; idx < _faces.rows(); ++idx) { // create new orthonormal basis glm::vec3 u; glm::vec3 tmp_v; for (int i = 0; i < 3; ++i) { u[i] = _vertices(_faces(idx, 1), i) - _vertices(_faces(idx, 0), i); tmp_v[i] = _vertices(_faces(idx, 2), i) - _vertices(_faces(idx, 0), i); } auto n = glm::cross(u, tmp_v); auto v = glm::cross(u, n); _orthonormalBasis[idx][0] = glm::normalize(u); _orthonormalBasis[idx][1] = glm::normalize(v); _orthonormalBasis[idx][2] = glm::normalize(n); } } typedef MeshAdaptor<const mesh::MeshDataAccessCollection::VertexAttribute*> mesh_adaptor; typedef ::nanoflann::KDTreeSingleIndexAdaptor<::nanoflann::L2_Simple_Adaptor<float, mesh_adaptor, float>, mesh_adaptor, 3> NanoFlannIndex; std::vector<float> _mesh_vertices; std::vector<uint32_t> _mesh_faces; std::vector<mesh::MeshDataAccessCollection::VertexAttribute> _mesh_attribs; mesh::MeshDataAccessCollection::IndexData _mesh_indices; Eigen::MatrixXd _vertices; Eigen::MatrixXd _normals; Eigen::MatrixXi _faces; uint32_t _pos_attribute_idx; int _normal_attribute_idx = -1; std::vector<std::array<glm::vec3, 3>> _orthonormalBasis; std::vector<int> _std_faces; mesh::MeshDataAccessCollection::Mesh _mesh; std::shared_ptr<NanoFlannIndex> _kd_tree; const mesh::MeshDataAccessCollection::VertexAttribute* _va_ptr; }; } // namespace probe } // namespace megamol
DRB015-outofbounds-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The outmost loop is be parallelized. But the inner level loop has out of bound access for b[i][j] when j equals to 0. This will case memory access of a previous row's last element. For example, an array of 4x4: j=0 1 2 3 i=0 x x x x 1 x x x x 2 x x x x 3 x x x x outer loop: i=2, inner loop: j=0 array element accessed b[i][j-1] becomes b[2][-1], which in turn is b[1][3] due to linearized row-major storage of the 2-D array. This causes loop-carried data dependence between i=2 and i=1. Data race pair: b[i][j]@80:7 vs. b[i][j-1]@80:15 */ #include <stdlib.h> int main(int argc, char* argv[]) { int i,j; int len=100; if (argc>1) len = atoi(argv[1]); int n=len, m=len; double b[n][m]; #pragma omp parallel for private(j) for (i=1;i<n;i++) for (j=0;j<m;j++) // Note there will be out of bound access b[i][j]=b[i][j-1]; return 0; }
clang-280954.c
#include <stdio.h> #include <assert.h> int main() { int a[1000]; #pragma omp target teams distribute for (int i = 0 ; i < 1000 ; i++) { a[i] = i; } for (int i = 0 ; i < 1000 ; i++) { assert( a[i] == i ); } printf("PASS\n"); return 0; }
GB_unop__minv_uint64_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_uint64_uint64) // op(A') function: GB (_unop_tran__minv_uint64_uint64) // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CAST(z, aij) \ uint64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = aij ; \ Cx [pC] = GB_IMINV_UNSIGNED (z, 64) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_uint64_uint64) ( uint64_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 64) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 64) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_uint64_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__minv_uint32_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_uint32_uint32) // op(A') function: GB (_unop_tran__minv_uint32_uint32) // C type: uint32_t // A type: uint32_t // cast: uint32_t cij = aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 32) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 32) ; // casting #define GB_CAST(z, aij) \ uint32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = aij ; \ Cx [pC] = GB_IMINV_UNSIGNED (z, 32) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_uint32_uint32) ( uint32_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; uint32_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 32) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; uint32_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 32) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_uint32_uint32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
residualbased_newton_raphson_mpc_contact_strategy.h
// KRATOS ______ __ __ _____ __ __ __ // / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ / // / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ / // / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / / // \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS // // License: BSD License // license: ContactStructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY) #define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY /* System Includes */ /* External Includes */ /* Project includes */ #include "contact_structural_mechanics_application_variables.h" #include "includes/kratos_parameters.h" #include "includes/define.h" #include "includes/model_part.h" #include "includes/variables.h" // Strategies #include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h" // Contact criteria #include "custom_strategies/custom_convergencecriterias/mpc_contact_criteria.h" // Utilities #include "utilities/variable_utils.h" #include "utilities/color_utilities.h" #include "utilities/math_utils.h" #include "utilities/atomic_utilities.h" // // Processes // #include "processes/fast_transfer_between_model_parts_process.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedNewtonRaphsonMPCContactStrategy * @ingroup ContactStructuralMechanicsApplication * @brief Contact Newton Raphson class * @details This class is a specialization of the Newton Raphson strategy with some custom modifications for contact problems * @author Vicente Mataix Ferrandiz */ template<class TSparseSpace, class TDenseSpace, // = DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedNewtonRaphsonMPCContactStrategy : public ResidualBasedNewtonRaphsonStrategy< TSparseSpace, TDenseSpace, TLinearSolver > { public: ///@name Type Definitions ///@{ /** Counted pointer of ClassName */ KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedNewtonRaphsonMPCContactStrategy ); typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType; typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef ResidualBasedNewtonRaphsonMPCContactStrategy<TSparseSpace, TDenseSpace, TLinearSolver> ClassType; typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType; typedef MPCContactCriteria<TSparseSpace, TDenseSpace> TMPCContactCriteriaType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef ModelPart::MasterSlaveConstraintContainerType ConstraintArrayType; typedef std::size_t IndexType; typedef std::size_t SizeType; /** * @brief Default constructor */ explicit ResidualBasedNewtonRaphsonMPCContactStrategy() { } /** * @brief Default constructor. (with parameters) * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ explicit ResidualBasedNewtonRaphsonMPCContactStrategy(ModelPart& rModelPart, Parameters ThisParameters) : BaseType(rModelPart) { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ explicit ResidualBasedNewtonRaphsonMPCContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : BaseType(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ), mThisParameters(ThisParameters) { KRATOS_TRY; // We create the contact criteria mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ explicit ResidualBasedNewtonRaphsonMPCContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : BaseType(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag), mThisParameters(ThisParameters) { KRATOS_TRY; // We create the contact criteria mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ explicit ResidualBasedNewtonRaphsonMPCContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : BaseType(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ), mThisParameters(ThisParameters) { KRATOS_TRY; // We create the contact criteria mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * Destructor. */ ~ResidualBasedNewtonRaphsonMPCContactStrategy() override = default; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Create method * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ typename StrategyBaseType::Pointer Create( ModelPart& rModelPart, Parameters ThisParameters ) const override { return Kratos::make_shared<ClassType>(rModelPart, ThisParameters); } /** * @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the * values of the solution step of interest are assumed equal to the old values */ void Predict() override { KRATOS_TRY BaseType::Predict(); // Getting model part ModelPart& r_model_part = StrategyBaseType::GetModelPart(); // We get the system TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; // We solve the system in order to check the active set once TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); typename TSchemeType::Pointer p_scheme = BaseType::GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = BaseType::GetBuilderAndSolver(); p_builder_and_solver->BuildAndSolve(p_scheme, BaseType::GetModelPart(), rA, rDx, rb); // Check active set const SizeType echo_level_convergence_criteria = BaseType::mpConvergenceCriteria->GetEchoLevel(); BaseType::mpConvergenceCriteria->SetEchoLevel(0); mpMPCContactCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); BaseType::mpConvergenceCriteria->SetEchoLevel(echo_level_convergence_criteria); KRATOS_CATCH("") } /** * @brief Initialization of member variables and prior operations */ void Initialize() override { KRATOS_TRY; // Computing nodal weights ComputeNodalWeights(); BaseType::Initialize(); KRATOS_CATCH(""); } /** * @brief The problem of interest is solved. * @details This function calls sequentially: Initialize(), InitializeSolutionStep(), Predict(), * SolveSolutionStep() and FinalizeSolutionStep(). * All those functions can otherwise be called separately. */ double Solve() override { this->Initialize(); this->InitializeSolutionStep(); this->Predict(); this->SolveSolutionStep(); this->FinalizeSolutionStep(); // TODO: Comment for proper work of interaction return 0.0; } /** * @brief Performs all the required operations that should be done (for each step) * before solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void InitializeSolutionStep() override { // Computing nodal weights ComputeNodalWeights(); BaseType::InitializeSolutionStep(); // // If enforcing NTN // const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool(); // if (enforce_ntn) { // EnforcingNTN(); // } } /** * @brief Performs all the required operations that should be done (for each step) * after solving the solution step. */ void FinalizeSolutionStep() override { KRATOS_TRY; BaseType::FinalizeSolutionStep(); KRATOS_CATCH(""); } /** * @brief Solves the current step. * @details This function returns true if a solution has been found, false otherwise. */ bool SolveSolutionStep() override { KRATOS_TRY; bool is_converged = false; // Getting model part ModelPart& r_model_part = StrategyBaseType::GetModelPart(); // We get the process info ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); if (r_process_info.Is(INTERACTION)) { // We get the system TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; int inner_iteration = 0; const SizeType echo_level_convergence_criteria = BaseType::mpConvergenceCriteria->GetEchoLevel(); while (!is_converged && inner_iteration < mThisParameters["inner_loop_iterations"].GetInt()) { ++inner_iteration; if (echo_level_convergence_criteria > 0 && r_model_part.GetCommunicator().MyPID() == 0 ) { KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("INNER ITERATION: ") << inner_iteration << std::endl; } // We solve one loop r_process_info[NL_ITERATION_NUMBER] = 1; is_converged = AuxiliarSolveSolutionStep(); // We check the convergence if (r_process_info[NL_ITERATION_NUMBER] == 1) r_process_info[NL_ITERATION_NUMBER] = 2; // Trigger check is_converged = mpMPCContactCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); if (echo_level_convergence_criteria > 0 && r_model_part.GetCommunicator().MyPID() == 0 ) { if (is_converged) KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FGRN("CONVERGED")) << std::endl; else KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("INNER ITERATION: ") << BOLDFONT(FRED("NOT CONVERGED")) << std::endl; } } } else { is_converged = AuxiliarSolveSolutionStep(); } return is_converged; KRATOS_CATCH(""); } /** * @brief Solves the current step. This function returns true if a solution has been found, false otherwise. (auxiliar method) */ bool AuxiliarSolveSolutionStep() { // Getting flag INTERACTION ModelPart& r_model_part = StrategyBaseType::GetModelPart(); const bool update_each_nl_iteration = mThisParameters["update_each_nl_iteration"].GetBool(); VariableUtils().SetFlag(INTERACTION, update_each_nl_iteration, r_model_part.GetSubModelPart("ComputingContact").Conditions()); // Pointers needed in the solution typename TSchemeType::Pointer p_scheme = this->GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = this->GetBuilderAndSolver(); auto& r_dof_set = p_builder_and_solver->GetDofSet(); TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; // Initializing the parameters of the Newton-Raphson cycle unsigned int iteration_number = 1; r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number; bool is_converged = false; bool residual_is_updated = false; // Computing nodal weights ComputeNodalWeights(); p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); // // If enforcing NTN // const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool(); // if (enforce_ntn) { // EnforcingNTN(); // } // Function to perform the building and the solving phase. if (StrategyBaseType::mRebuildLevel > 0 || StrategyBaseType::mStiffnessMatrixIsBuilt == false) { TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); //Dx=0.00; TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database BaseType::UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); if (is_converged) { if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); } // Iteration Cycle... performed only for NonLinearProblems while (!is_converged && iteration_number++ < BaseType::mMaxIterationNumber) { // Setting the number of iteration r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number; // Computing nodal weights ComputeNodalWeights(); // Calling InitializeNonLinIteration p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); // Shaping correctly the system if (update_each_nl_iteration) { p_builder_and_solver->SetUpDofSet(p_scheme, r_model_part); p_builder_and_solver->SetUpSystem(r_model_part); p_builder_and_solver->ResizeAndInitializeVectors(p_scheme, BaseType::mpA, BaseType::mpDx, BaseType::mpb, r_model_part); } is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); // Call the linear system solver to find the correction mDx for the it is not called if there is no system to solve if (SparseSpaceType::Size(rDx) != 0) { if (StrategyBaseType::mRebuildLevel > 1 || !StrategyBaseType::mStiffnessMatrixIsBuilt) { if (!BaseType::GetKeepSystemConstantDuringIterations()) { //A = 0.00; TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { KRATOS_WARNING("NO DOFS") << "ATTENTION: no free DOFs!! " << std::endl; } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database BaseType::UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); residual_is_updated = false; // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); if (is_converged) { if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); residual_is_updated = true; } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); } } // Plots a warning if the maximum number of iterations is exceeded if (iteration_number >= BaseType::mMaxIterationNumber) { BaseType::MaxIterationsExceeded(); } else { KRATOS_INFO_IF("NR-Strategy", this->GetEchoLevel() > 0) << "Convergence achieved after " << iteration_number << " / " << BaseType::mMaxIterationNumber << " iterations" << std::endl; } // Recalculate residual if needed (note that some convergence criteria need it to be recalculated) if (!residual_is_updated) { // NOTE: // The following part will be commented because it is time consuming // and there is no obvious reason to be here. If someone need this // part please notify the community via mailing list before uncommenting it. // Pooyan. // TSparseSpace::SetToZero(mb); // p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb); } // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); return is_converged; } /** * @brief This method returns the defaulr parameters in order to avoid code duplication * @return Returns the default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "newton_raphson_mpc_contact_strategy", "inner_loop_iterations" : 5, "update_each_nl_iteration" : false, "enforce_ntn" : false })" ); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "newton_raphson_mpc_contact_strategy"; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ Parameters mThisParameters; /// The configuration parameters typename TConvergenceCriteriaType::Pointer mpMPCContactCriteria; /// The contact criteria ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); // We create the contact criteria mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>(); // Copy the parameters mThisParameters = ThisParameters; } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@{ /** * Copy constructor. */ ResidualBasedNewtonRaphsonMPCContactStrategy(const ResidualBasedNewtonRaphsonMPCContactStrategy& Other) { }; private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ // /** // * @brief This inforces NTN formulation // */ // void EnforcingNTN() // { // // List of enforced nodes to not repeat // std::unordered_set<IndexType> enforced_nodes; // // // Getting contact model part // ModelPart& r_root_model_part = StrategyBaseType::GetModelPart().GetRootModelPart(); // ModelPart& r_computing_contact_model_part = StrategyBaseType::GetModelPart().GetSubModelPart("ComputingContact"); // // // The process info // const auto& r_process_info = r_root_model_part.GetProcessInfo(); // // // Reset the pointers of the conditions // for (auto& r_cond : r_computing_contact_model_part.Conditions()) { // if (r_cond.Has(CONSTRAINT_POINTER)) { // r_cond.SetValue(CONSTRAINT_POINTER, nullptr); // } // } // // // Iterate over the constraints // IndexType counter = 1; // for (auto& r_const : r_root_model_part.MasterSlaveConstraints()) { // r_const.SetId(counter); // ++counter; // } // // // Auxiliar classes // Matrix original_relation_matrix, relation_matrix; // Vector original_constant_vector, constant_vector; // ModelPart::DofsVectorType original_master_dofs, master_dofs, original_slave_dofs, slave_dofs; // // // Iterate over the constraints // for (auto& r_const : r_computing_contact_model_part.MasterSlaveConstraints()) { // // Getting original system // r_const.GetLocalSystem(original_relation_matrix, original_constant_vector, r_process_info); // r_const.GetDofList(original_slave_dofs, original_master_dofs, r_process_info); // // // TODO: Finish rebuild // // // Creating new constraint // r_root_model_part.CreateNewMasterSlaveConstraint("LinearMasterSlaveConstraint", counter, master_dofs, slave_dofs, relation_matrix, constant_vector); // // // Setting to remove the old constraints // r_const.Set(TO_ERASE, true); // // ++counter; // } // // // Remove old constraints // r_root_model_part.RemoveMasterSlaveConstraintsFromAllLevels(TO_ERASE); // // // Transfer constraints from the root to the computing model part // FastTransferBetweenModelPartsProcess(r_computing_contact_model_part, r_root_model_part, FastTransferBetweenModelPartsProcess::EntityTransfered::CONSTRAINTS).Execute(); // // // Reorder ids // counter = 1; // for (auto& r_const : r_root_model_part.MasterSlaveConstraints()) { // r_const.SetId(counter); // ++counter; // } // } /** * @brief This computes the nodal weights */ void ComputeNodalWeights() { // Getting contact model part ModelPart& r_contact_model_part = StrategyBaseType::GetModelPart().GetSubModelPart("Contact"); // Reset the NODAL_PAUX and NODAL_MAUX auto& r_nodes_array = r_contact_model_part.Nodes(); VariableUtils().SetNonHistoricalVariableToZero(NODAL_PAUX, r_nodes_array); VariableUtils().SetNonHistoricalVariableToZero(NODAL_MAUX, r_nodes_array); // We set the constraints active and inactive in function of the active set auto& r_conditions_array = r_contact_model_part.Conditions(); auto it_cond_begin = r_conditions_array.begin(); // If enforcing NTN const bool enforce_ntn = false; // const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool(); // if (enforce_ntn) { // VariableUtils().SetNonHistoricalVariable(NODAL_PAUX, 1.0, r_nodes_array); // } #pragma omp parallel for for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) { auto it_cond = it_cond_begin + i; // Only slave conditions if (it_cond->Is(SLAVE)) { auto& r_geometry = it_cond->GetGeometry(); Vector lumping_factor; lumping_factor = r_geometry.LumpingFactors(lumping_factor); const double domain_size = r_geometry.DomainSize(); for (IndexType i_node = 0; i_node < r_geometry.size(); ++i_node) { auto& r_node = r_geometry[i_node]; if (!enforce_ntn) { AtomicAdd(r_node.GetValue(NODAL_PAUX), 1.0); } AtomicAdd(r_node.GetValue(NODAL_MAUX), lumping_factor[i_node] * domain_size); } } } } ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedNewtonRaphsonMPCContactStrategy */ ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} } // namespace Kratos #endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY */
gemm.c
#include "gemm.h" #include "utils.h" #include "im2col.h" #include "dark_cuda.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #include <float.h> #include <string.h> #include <stdint.h> #ifdef _WIN32 #include <intrin.h> #endif #if defined(_OPENMP) #include <omp.h> #endif #define TILE_M 4 // 4 ops #define TILE_N 16 // AVX2 = 2 ops * 8 floats #define TILE_K 16 // loop #ifdef __cplusplus #define PUT_IN_REGISTER #else #define PUT_IN_REGISTER register #endif void gemm_bin(int M, int N, int K, float ALPHA, char *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ char A_PART = A[i*lda+k]; if(A_PART){ for(j = 0; j < N; ++j){ C[i*ldc+j] += B[k*ldb+j]; } } else { for(j = 0; j < N; ++j){ C[i*ldc+j] -= B[k*ldb+j]; } } } } } float *random_matrix(int rows, int cols) { int i; float* m = (float*)calloc(rows * cols, sizeof(float)); for(i = 0; i < rows*cols; ++i){ m[i] = (float)rand()/RAND_MAX; } return m; } void time_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<10; ++i){ gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc); } //-------------------------------------------- // XNOR bitwise GEMM for binary neural network //-------------------------------------------- static inline unsigned char xnor(unsigned char a, unsigned char b) { //return a == b; return !(a^b); } // INT-32 static inline uint32_t get_bit_int32(uint32_t const*const src, size_t index) { size_t src_i = index / 32; int src_shift = index % 32; unsigned char val = (src[src_i] & (1 << src_shift)) > 0; return val; } static inline uint32_t xnor_int32(uint32_t a, uint32_t b) { return ~(a^b); } static inline uint64_t xnor_int64(uint64_t a, uint64_t b) { return ~(a^b); } static inline uint32_t fill_bit_int32(char src) { if (src == 0) return 0x00000000; else return 0xFFFFFFFF; } static inline uint64_t fill_bit_int64(char src) { if (src == 0) return 0x0000000000000000; else return 0xFFFFFFFFFFFFFFFF; } void binary_int32_printf(uint32_t src) { int i; for (i = 0; i < 32; ++i) { if (src & 1) printf("1"); else printf("0"); src = src >> 1; } printf("\n"); } void binary_int64_printf(uint64_t src) { int i; for (i = 0; i < 64; ++i) { if (src & 1) printf("1"); else printf("0"); src = src >> 1; } printf("\n"); } /* void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int *count_arr = calloc(M*N, sizeof(int)); int i, j, k; for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216] char a_bit = get_bit(A, i*lda + k); for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] char b_bit = get_bit(B, k*ldb + j); count_arr[i*ldc + j] += xnor(a_bit, b_bit); } } } for (i = 0; i < M; ++i) { float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) { C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val; } } free(count_arr); } */ /* void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int *count_arr = calloc(M*N, sizeof(int)); int i, j, k; for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216] char a_bit = get_bit(A, i*lda + k); char b_bit = get_bit(B, j*ldb + k); count_arr[i*ldc + j] += xnor(a_bit, b_bit); } } } for (i = 0; i < M; ++i) { float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) { C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val; } } free(count_arr); } */ /* void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int *count_arr = calloc(M*N, sizeof(int)); int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] int j, k, h; for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216] const char a_bit = get_bit(A, i*lda + k); uint64_t a_bit64 = fill_bit_int64(a_bit); int k_ldb = k*ldb; for (j = 0; j < N; j += 64) { // out_h*out_w - one channel output size [169 - 173056] if ((N - j > 64) && (k_ldb % 8 == 0)) { uint64_t b_bit64 = *((uint64_t *)(B + (k_ldb + j) / 8)); uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); //printf("\n %d \n",__builtin_popcountll(c_bit64)); // gcc printf("\n %d \n", __popcnt64(c_bit64)); // msvs int h; for (h = 0; h < 64; ++h) if ((c_bit64 >> h) & 1) count_arr[i*ldc + j + h] += 1; //binary_int64_printf(a_bit64); //binary_int64_printf(b_bit64); //binary_int64_printf(c_bit64); } else { for (; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] char b_bit = get_bit(B, k_ldb + j); if (xnor(a_bit, b_bit)) count_arr[i*ldc + j] += 1; } } } } } if (mean_arr) { //int K_2 = K / 2; for (i = 0; i < M; ++i) { float mean_val = mean_arr[i]; //float mean_val2 = 2 * mean_val; for (j = 0; j < N; ++j) { C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val; //C[i*ldc + j] = (count_arr[i*ldc + j] - K_2) *mean_val2; } } } else { for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { C[i*ldc + j] = count_arr[i*ldc + j] - K / 2; } } } free(count_arr); //getchar(); } */ /* void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] int j, k, h; float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] int count = 0; for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216] uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); #ifdef WIN32 int tmp_count = __popcnt64(c_bit64); #else int tmp_count = __builtin_popcountll(c_bit64); #endif if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits count += tmp_count; //binary_int64_printf(c_bit64); //printf(", count = %d \n\n", tmp_count); } C[i*ldc + j] = (2 * count - K) * mean_val; } } } */ //---------------------------- // is not used void transpose_32x32_bits_my(uint32_t *A, uint32_t *B, int lda, int ldb) { unsigned int x, y; for (y = 0; y < 32; ++y) { for (x = 0; x < 32; ++x) { if (A[y * lda] & (1 << x)) B[x * ldb] |= (uint32_t)1 << y; } } } #ifndef GPU uint8_t reverse_8_bit(uint8_t a) { return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16; } uint32_t reverse_32_bit(uint32_t a) { // unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input)); return (reverse_8_bit(a >> 24) << 0) | (reverse_8_bit(a >> 16) << 8) | (reverse_8_bit(a >> 8) << 16) | (reverse_8_bit(a >> 0) << 24); } #define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j); void transpose32_optimized(uint32_t A[32]) { int j, k; unsigned m, t; //m = 0x0000FFFF; //for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) { // for (k = 0; k < 32; k = (k + j + 1) & ~j) { // t = (A[k] ^ (A[k + j] >> j)) & m; // A[k] = A[k] ^ t; // A[k + j] = A[k + j] ^ (t << j); // } //} j = 16; m = 0x0000FFFF; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 8; m = 0x00ff00ff; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 4; m = 0x0f0f0f0f; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 2; m = 0x33333333; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 1; m = 0x55555555; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } // reverse Y for (j = 0; j < 16; ++j) { uint32_t tmp = A[j]; A[j] = reverse_32_bit(A[31 - j]); A[31 - j] = reverse_32_bit(tmp); } } void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n) { unsigned A_tmp[32]; int i; #pragma unroll for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m]; transpose32_optimized(A_tmp); #pragma unroll for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i]; } void transpose_8x8_bits_my(unsigned char *A, unsigned char *B, int lda, int ldb) { unsigned x, y; for (y = 0; y < 8; ++y) { for (x = 0; x < 8; ++x) { if (A[y * lda] & (1 << x)) B[x * ldb] |= 1 << y; } } } unsigned char reverse_byte_1(char a) { return ((a & 0x1) << 7) | ((a & 0x2) << 5) | ((a & 0x4) << 3) | ((a & 0x8) << 1) | ((a & 0x10) >> 1) | ((a & 0x20) >> 3) | ((a & 0x40) >> 5) | ((a & 0x80) >> 7); } unsigned char reverse_byte(unsigned char a) { return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16; } static unsigned char lookup[16] = { 0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe, 0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf, }; unsigned char reverse_byte_3(unsigned char n) { // Reverse the top and bottom nibble then swap them. return (lookup[n & 0b1111] << 4) | lookup[n >> 4]; } void transpose8rS32_reversed_diagonale(unsigned char* A, unsigned char* B, int m, int n) { unsigned x, y, t; x = y = 0; // Load the array and pack it into x and y. //x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m]; //y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m]; t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7); t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7); t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14); t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14); t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F); y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F); x = t; B[7 * n] = reverse_byte(x >> 24); B[6 * n] = reverse_byte(x >> 16); B[5 * n] = reverse_byte(x >> 8); B[4 * n] = reverse_byte(x); B[3 * n] = reverse_byte(y >> 24); B[2 * n] = reverse_byte(y >> 16); B[1 * n] = reverse_byte(y >> 8); B[0 * n] = reverse_byte(y); } /* // transpose by 8-bit void transpose_bin(char *A, char *B, const int n, const int m, const int lda, const int ldb, const int block_size) { //printf("\n n = %d, ldb = %d \t\t m = %d, lda = %d \n", n, ldb, m, lda); int i; #pragma omp parallel for for (i = 0; i < n; i += 8) { int j; for (j = 0; j < m; j += 8) { int a_index = i*lda + j; int b_index = j*ldb + i; //transpose_8x8_bits_my(&A[a_index/8], &B[b_index/8], lda/8, ldb/8); transpose8rS32_reversed_diagonale(&A[a_index / 8], &B[b_index / 8], lda / 8, ldb / 8); } for (; j < m; ++j) { if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i); } } } */ #endif // transpose by 32-bit void transpose_bin(uint32_t *A, uint32_t *B, const int n, const int m, const int lda, const int ldb, const int block_size) { //printf("\n n = %d (n mod 32 = %d), m = %d (m mod 32 = %d) \n", n, n % 32, m, m % 32); //printf("\n lda = %d (lda mod 32 = %d), ldb = %d (ldb mod 32 = %d) \n", lda, lda % 32, ldb, ldb % 32); int i; #pragma omp parallel for for (i = 0; i < n; i += 32) { int j; for (j = 0; j < m; j += 32) { int a_index = i*lda + j; int b_index = j*ldb + i; transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32); //transpose_32x32_bits_my(&A[a_index/32], &B[b_index/32], lda/32, ldb/32); } for (; j < m; ++j) { if (get_bit((const unsigned char* const)A, i * lda + j)) set_bit((unsigned char* const)B, j * ldb + i); } } } static inline int popcnt_32(uint32_t val32) { #ifdef WIN32 // Windows MSVS int tmp_count = __popcnt(val32); #else // Linux GCC int tmp_count = __builtin_popcount(val32); #endif return tmp_count; } //---------------------------- #if (defined(__AVX__) && defined(__x86_64__)) || defined(_WIN64) #ifdef _WIN64 #include <intrin.h> #include <ammintrin.h> #include <immintrin.h> #include <smmintrin.h> #if defined(_MSC_VER) && _MSC_VER <= 1900 static inline __int32 _mm256_extract_epi64(__m256i a, const int index) { return a.m256i_i64[index]; } static inline __int32 _mm256_extract_epi32(__m256i a, const int index) { return a.m256i_i32[index]; } #endif static inline float _castu32_f32(uint32_t a) { return *((float *)&a); } static inline float _mm256_extract_float32(__m256 a, const int index) { return a.m256_f32[index]; } #else // Linux GCC/Clang #include <x86intrin.h> #include <ammintrin.h> #include <immintrin.h> #include <smmintrin.h> #include <cpuid.h> static inline float _castu32_f32(uint32_t a) { return *((float *)&a); } static inline float _mm256_extract_float32(__m256 a, const int index) { return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), index)); } void asm_cpuid(uint32_t* abcd, uint32_t eax) { uint32_t ebx = 0, edx = 0, ecx = 0; // EBX is saved to EDI and later restored __asm__("movl %%ebx, %%edi;" "cpuid;" "xchgl %%ebx, %%edi;" : "=D"(ebx), "+a"(eax), "+c"(ecx), "=d"(edx)); abcd[0] = eax; abcd[1] = ebx; abcd[2] = ecx; abcd[3] = edx; } #endif #ifdef _WIN32 // Windows #define cpuid(info, x) __cpuidex(info, x, 0) #else // GCC Intrinsics void cpuid(int info[4], int InfoType) { __cpuid_count(InfoType, 0, info[0], info[1], info[2], info[3]); } #endif // Misc. static int HW_MMX, HW_x64, HW_RDRAND, HW_BMI1, HW_BMI2, HW_ADX, HW_PREFETCHWT1; static int HW_ABM; // Advanced Bit Manipulation // SIMD: 128-bit static int HW_SSE, HW_SSE2, HW_SSE3, HW_SSSE3, HW_SSE41, HW_SSE42, HW_SSE4a, HW_AES, HW_SHA; // SIMD: 256-bit static int HW_AVX, HW_XOP, HW_FMA3, HW_FMA4, HW_AVX2; // SIMD: 512-bit static int HW_AVX512F; // AVX512 Foundation static int HW_AVX512CD; // AVX512 Conflict Detection static int HW_AVX512PF; // AVX512 Prefetch static int HW_AVX512ER; // AVX512 Exponential + Reciprocal static int HW_AVX512VL; // AVX512 Vector Length Extensions static int HW_AVX512BW; // AVX512 Byte + Word static int HW_AVX512DQ; // AVX512 Doubleword + Quadword static int HW_AVX512IFMA; // AVX512 Integer 52-bit Fused Multiply-Add static int HW_AVX512VBMI; // AVX512 Vector Byte Manipulation Instructions // https://stackoverflow.com/questions/6121792/how-to-check-if-a-cpu-supports-the-sse3-instruction-set void check_cpu_features(void) { int info[4]; cpuid(info, 0); int nIds = info[0]; cpuid(info, 0x80000000); unsigned nExIds = info[0]; // Detect Features if (nIds >= 0x00000001) { cpuid(info, 0x00000001); HW_MMX = (info[3] & ((int)1 << 23)) != 0; HW_SSE = (info[3] & ((int)1 << 25)) != 0; HW_SSE2 = (info[3] & ((int)1 << 26)) != 0; HW_SSE3 = (info[2] & ((int)1 << 0)) != 0; HW_SSSE3 = (info[2] & ((int)1 << 9)) != 0; HW_SSE41 = (info[2] & ((int)1 << 19)) != 0; HW_SSE42 = (info[2] & ((int)1 << 20)) != 0; HW_AES = (info[2] & ((int)1 << 25)) != 0; HW_AVX = (info[2] & ((int)1 << 28)) != 0; HW_FMA3 = (info[2] & ((int)1 << 12)) != 0; HW_RDRAND = (info[2] & ((int)1 << 30)) != 0; } if (nIds >= 0x00000007) { cpuid(info, 0x00000007); HW_AVX2 = (info[1] & ((int)1 << 5)) != 0; HW_BMI1 = (info[1] & ((int)1 << 3)) != 0; HW_BMI2 = (info[1] & ((int)1 << 8)) != 0; HW_ADX = (info[1] & ((int)1 << 19)) != 0; HW_SHA = (info[1] & ((int)1 << 29)) != 0; HW_PREFETCHWT1 = (info[2] & ((int)1 << 0)) != 0; HW_AVX512F = (info[1] & ((int)1 << 16)) != 0; HW_AVX512CD = (info[1] & ((int)1 << 28)) != 0; HW_AVX512PF = (info[1] & ((int)1 << 26)) != 0; HW_AVX512ER = (info[1] & ((int)1 << 27)) != 0; HW_AVX512VL = (info[1] & ((int)1 << 31)) != 0; HW_AVX512BW = (info[1] & ((int)1 << 30)) != 0; HW_AVX512DQ = (info[1] & ((int)1 << 17)) != 0; HW_AVX512IFMA = (info[1] & ((int)1 << 21)) != 0; HW_AVX512VBMI = (info[2] & ((int)1 << 1)) != 0; } if (nExIds >= 0x80000001) { cpuid(info, 0x80000001); HW_x64 = (info[3] & ((int)1 << 29)) != 0; HW_ABM = (info[2] & ((int)1 << 5)) != 0; HW_SSE4a = (info[2] & ((int)1 << 6)) != 0; HW_FMA4 = (info[2] & ((int)1 << 16)) != 0; HW_XOP = (info[2] & ((int)1 << 11)) != 0; } } int is_avx() { static int result = -1; if (result == -1) { check_cpu_features(); result = HW_AVX; if (result == 1) printf(" Used AVX \n"); else printf(" Not used AVX \n"); } return result; } int is_fma_avx2() { static int result = -1; if (result == -1) { check_cpu_features(); result = HW_FMA3 && HW_AVX2; if (result == 1) printf(" Used FMA & AVX2 \n"); else printf(" Not used FMA & AVX2 \n"); } return result; } // https://software.intel.com/sites/landingpage/IntrinsicsGuide void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; if (is_avx() == 1) { // AVX for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { float A_PART = ALPHA*A[i*lda + k]; __m256 a256, b256, c256, result256; // AVX a256 = _mm256_set1_ps(A_PART); for (j = 0; j < N - 8; j += 8) { b256 = _mm256_loadu_ps(&B[k*ldb + j]); c256 = _mm256_loadu_ps(&C[i*ldc + j]); // FMA - Intel Haswell (2013), AMD Piledriver (2012) //result256 = _mm256_fmadd_ps(a256, b256, c256); result256 = _mm256_mul_ps(a256, b256); result256 = _mm256_add_ps(result256, c256); _mm256_storeu_ps(&C[i*ldc + j], result256); } int prev_end = (N % 8 == 0) ? (N - 8) : (N / 8) * 8; for (j = prev_end; j < N; ++j) C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } else { for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } /* // SSE __m128 a128, b128, c128, result128; // SSE a128 = _mm_set1_ps(A_PART); for (j = 0; j < N - 4; j += 4) { b128 = _mm_loadu_ps(&B[k*ldb + j]); c128 = _mm_loadu_ps(&C[i*ldc + j]); //result128 = _mm_fmadd_ps(a128, b128, c128); result128 = _mm_mul_ps(a128, b128); result128 = _mm_add_ps(result128, c128); _mm_storeu_ps(&C[i*ldc + j], result128); } int prev_end = (N % 4 == 0) ? (N - 4) : (N / 4) * 4; for (j = prev_end; j < N; ++j){ C[i*ldc + j] += A_PART*B[k*ldb + j]; } */ } } } } void gemm_nn_fast(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i; #pragma omp parallel for for (i = 0; i < (M / TILE_M)*TILE_M; i += TILE_M) { int j, k; int i_d, k_d; for (k = 0; k < (K / TILE_K)*TILE_K; k += TILE_K) { for (j = 0; j < (N / TILE_N)*TILE_N; j += TILE_N) { // L1 - 6 bits tag [11:6] - cache size 32 KB, conflict for each 4 KB // L2 - 9 bits tag [14:6] - cache size 256 KB, conflict for each 32 KB // L3 - 13 bits tag [18:6] - cache size 8 MB, conflict for each 512 KB __m256 result256; __m256 a256_0, b256_0; // AVX __m256 a256_1, b256_1; // AVX __m256 a256_2;// , b256_2; // AVX __m256 a256_3;// , b256_3; // AVX __m256 c256_0, c256_1, c256_2, c256_3; __m256 c256_4, c256_5, c256_6, c256_7; c256_0 = _mm256_loadu_ps(&C[(0 + i)*ldc + (0 + j)]); c256_1 = _mm256_loadu_ps(&C[(1 + i)*ldc + (0 + j)]); c256_2 = _mm256_loadu_ps(&C[(0 + i)*ldc + (8 + j)]); c256_3 = _mm256_loadu_ps(&C[(1 + i)*ldc + (8 + j)]); c256_4 = _mm256_loadu_ps(&C[(2 + i)*ldc + (0 + j)]); c256_5 = _mm256_loadu_ps(&C[(3 + i)*ldc + (0 + j)]); c256_6 = _mm256_loadu_ps(&C[(2 + i)*ldc + (8 + j)]); c256_7 = _mm256_loadu_ps(&C[(3 + i)*ldc + (8 + j)]); for (k_d = 0; k_d < (TILE_K); ++k_d) { a256_0 = _mm256_set1_ps(ALPHA*A[(0 + i)*lda + (k_d + k)]); a256_1 = _mm256_set1_ps(ALPHA*A[(1 + i)*lda + (k_d + k)]); a256_2 = _mm256_set1_ps(ALPHA*A[(2 + i)*lda + (k_d + k)]); a256_3 = _mm256_set1_ps(ALPHA*A[(3 + i)*lda + (k_d + k)]); b256_0 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (0 + j)]); b256_1 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (8 + j)]); // FMA - Intel Haswell (2013), AMD Piledriver (2012) //c256_0 = _mm256_fmadd_ps(a256_0, b256_0, c256_0); //c256_1 = _mm256_fmadd_ps(a256_1, b256_0, c256_1); //c256_2 = _mm256_fmadd_ps(a256_0, b256_1, c256_2); //c256_3 = _mm256_fmadd_ps(a256_1, b256_1, c256_3); //c256_4 = _mm256_fmadd_ps(a256_2, b256_0, c256_4); //c256_5 = _mm256_fmadd_ps(a256_3, b256_0, c256_5); //c256_6 = _mm256_fmadd_ps(a256_2, b256_1, c256_6); //c256_7 = _mm256_fmadd_ps(a256_3, b256_1, c256_7); result256 = _mm256_mul_ps(a256_0, b256_0); c256_0 = _mm256_add_ps(result256, c256_0); result256 = _mm256_mul_ps(a256_1, b256_0); c256_1 = _mm256_add_ps(result256, c256_1); result256 = _mm256_mul_ps(a256_0, b256_1); c256_2 = _mm256_add_ps(result256, c256_2); result256 = _mm256_mul_ps(a256_1, b256_1); c256_3 = _mm256_add_ps(result256, c256_3); result256 = _mm256_mul_ps(a256_2, b256_0); c256_4 = _mm256_add_ps(result256, c256_4); result256 = _mm256_mul_ps(a256_3, b256_0); c256_5 = _mm256_add_ps(result256, c256_5); result256 = _mm256_mul_ps(a256_2, b256_1); c256_6 = _mm256_add_ps(result256, c256_6); result256 = _mm256_mul_ps(a256_3, b256_1); c256_7 = _mm256_add_ps(result256, c256_7); } _mm256_storeu_ps(&C[(0 + i)*ldc + (0 + j)], c256_0); _mm256_storeu_ps(&C[(1 + i)*ldc + (0 + j)], c256_1); _mm256_storeu_ps(&C[(0 + i)*ldc + (8 + j)], c256_2); _mm256_storeu_ps(&C[(1 + i)*ldc + (8 + j)], c256_3); _mm256_storeu_ps(&C[(2 + i)*ldc + (0 + j)], c256_4); _mm256_storeu_ps(&C[(3 + i)*ldc + (0 + j)], c256_5); _mm256_storeu_ps(&C[(2 + i)*ldc + (8 + j)], c256_6); _mm256_storeu_ps(&C[(3 + i)*ldc + (8 + j)], c256_7); } for (j = (N / TILE_N)*TILE_N; j < N; ++j) { for (i_d = i; i_d < (i + TILE_M); ++i_d) { for (k_d = k; k_d < (k + TILE_K); ++k_d) { PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k_d]; C[i_d*ldc + j] += A_PART*B[k_d*ldb + j]; } } } } for (k = (K / TILE_K)*TILE_K; k < K; ++k) { for (i_d = i; i_d < (i + TILE_M); ++i_d) { PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k]; for (j = 0; j < N; ++j) { C[i_d*ldc + j] += A_PART*B[k*ldb + j]; } } } } for (i = (M / TILE_M)*TILE_M; i < M; ++i) { int j, k; for (k = 0; k < K; ++k) { PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } } void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA, uint32_t *A, int lda, uint32_t *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n int j, s; float mean_val = mean_arr[i]; //printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]); for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c) { PUT_IN_REGISTER uint32_t A_PART = A[i*lda + s]; __m256i a256 = _mm256_set1_epi32(A_PART); for (j = 0; j < N - 8; j += 8) { __m256i b256 = *((__m256i*)&B[s*ldb + j]); __m256i xor256 = _mm256_xor_si256(a256, b256); // xnor = xor(a,b) __m256i all_1 = _mm256_set1_epi8((char)255); __m256i xnor256 = _mm256_andnot_si256(xor256, all_1); // xnor = not(xor(a,b)) // waiting for - CPUID Flags: AVX512VPOPCNTDQ: __m512i _mm512_popcnt_epi32(__m512i a) __m256 count = _mm256_setr_ps( popcnt_32(_mm256_extract_epi32(xnor256, 0)), popcnt_32(_mm256_extract_epi32(xnor256, 1)), popcnt_32(_mm256_extract_epi32(xnor256, 2)), popcnt_32(_mm256_extract_epi32(xnor256, 3)), popcnt_32(_mm256_extract_epi32(xnor256, 4)), popcnt_32(_mm256_extract_epi32(xnor256, 5)), popcnt_32(_mm256_extract_epi32(xnor256, 6)), popcnt_32(_mm256_extract_epi32(xnor256, 7))); __m256 val2 = _mm256_set1_ps(2); count = _mm256_mul_ps(count, val2); // count * 2 __m256 val32 = _mm256_set1_ps(32); count = _mm256_sub_ps(count, val32); // count - 32 __m256 mean256 = _mm256_set1_ps(mean_val); count = _mm256_mul_ps(count, mean256); // count * mean_val __m256 c256 = *((__m256*)&C[i*ldc + j]); count = _mm256_add_ps(count, c256); // c = c + count *((__m256*)&C[i*ldc + j]) = count; } for (; j < N; ++j) // out_h*out_w; { PUT_IN_REGISTER uint32_t B_PART = B[s*ldb + j]; uint32_t xnor_result = ~(A_PART ^ B_PART); int32_t count = popcnt_32(xnor_result); // must be Signed int C[i*ldc + j] += (2 * count - 32) * mean_val; } } } } void convolution_2d_old(int w, int h, int ksize, int n, int c, int pad, int stride, float *weights, float *input, float *output) { //const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1 //const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1 int fil; // filter index #pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP for (fil = 0; fil < n; ++fil) { //int i, f, j; int chan, y, x, f_y, f_x; // channel index for (chan = 0; chan < c; ++chan) // input - y for (y = 0; y < h; ++y) // input - x for (x = 0; x < w; ++x) { int const output_index = fil*w*h + y*w + x; int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize; int const input_pre_index = chan*w*h; float sum = 0; // filter - y for (f_y = 0; f_y < ksize; ++f_y) { int input_y = y + f_y - pad; // filter - x for (f_x = 0; f_x < ksize; ++f_x) { int input_x = x + f_x - pad; if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue; int input_index = input_pre_index + input_y*w + input_x; int weights_index = weights_pre_index + f_y*ksize + f_x; sum += input[input_index] * weights[weights_index]; } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; output[output_index] += sum; } } } void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride, float *weights, float *input, float *output, float *mean) { //const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1 //const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1 int i; #if defined(_OPENMP) static int max_num_threads = 0; if (max_num_threads == 0) { max_num_threads = omp_get_max_threads(); //omp_set_num_threads( max_num_threads / 2); } #endif //convolution_2d_old(w, h, ksize, n, c, pad, stride, weights, input, output); __m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); for (i = 0; i < ksize*ksize*n*c; i+=8) { *((__m256*)&weights[i]) = _mm256_and_ps(*((__m256*)&weights[i]), _mm256_castsi256_ps(all256_sing1)); } //for (i = 0; i < w*h*c; i += 8) { //*((__m256*)&input[i]) = _mm256_and_ps(*((__m256*)&input[i]), _mm256_castsi256_ps(all256_sing1)); //} //__m256i all256_last_zero = _mm256_set1_epi32(0xFFFFFFFF); //all256_last_zero.m256i_i32[7] = 0; __m256i all256_last_zero = _mm256_set_epi32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0); __m256i idx256 = _mm256_set_epi32(0, 7, 6, 5, 4, 3, 2, 1); //__m256 all256_sing1 = _mm256_set1_ps(0x80000000); __m256 all256_one = _mm256_set1_ps(1); __m256i all256i_one = _mm256_set1_epi32(1); ///__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i])); ///__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats int fil; // filter index #pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP for (fil = 0; fil < n; ++fil) { int chan, y, x, f_y, f_x; float cur_mean = fabs(mean[fil]); __m256 mean256 = _mm256_set1_ps(cur_mean); // channel index //for (chan = 0; chan < c; ++chan) // input - y for (y = 0; y < h; ++y) // input - x for (x = 0; x < w-8; x+=8) { int const output_index = fil*w*h + y*w + x; float sum = 0; __m256 sum256 = _mm256_set1_ps(0); for (chan = 0; chan < c; ++chan) { int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize; int const input_pre_index = chan*w*h; // filter - y for (f_y = 0; f_y < ksize; ++f_y) { int input_y = y + f_y - pad; //__m256 in = *((__m256*)&input[input_pre_index + input_y*w]); if (input_y < 0 || input_y >= h) continue; //__m256 in = _mm256_loadu_ps(&input[input_pre_index + input_y*w + x - pad]); // filter - x for (f_x = 0; f_x < ksize; ++f_x) { int input_x = x + f_x - pad; //if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue; int input_index = input_pre_index + input_y*w + input_x; int weights_index = weights_pre_index + f_y*ksize + f_x; //if (input_y < 0 || input_y >= h) continue; //sum += input[input_index] * weights[weights_index]; __m256 in = *((__m256*)&input[input_index]); __m256 w = _mm256_set1_ps(weights[weights_index]); //__m256 w_sign = _mm256_and_ps(w, _mm256_castsi256_ps(all256_sing1)); // check sign in 8 x 32-bit floats __m256 xor256 = _mm256_xor_ps(w, in); //printf("\n xor256_1 = %f, xor256_2 = %f \n", xor256.m256_f32[0], xor256.m256_f32[1]); //printf("\n in = %f, w = %f, xor256 = %f \n", in.m256_f32[0], w_sign.m256_f32[0], xor256.m256_f32[0]); //__m256 pn1 = _mm256_and_ps(_mm256_castsi256_ps(all256i_one), xor256); //sum256 = xor256; sum256 = _mm256_add_ps(xor256, sum256); //printf("\n --- \n"); //printf("\n 0 = %f, 1 = %f, 2 = %f, 3 = %f, 4 = %f, 5 = %f, 6 = %f, 7 = %f \n", in.m256_f32[0], in.m256_f32[1], in.m256_f32[2], in.m256_f32[3], in.m256_f32[4], in.m256_f32[5], in.m256_f32[6], in.m256_f32[7]); if (f_x < ksize-1) { //in = _mm256_permutevar8x32_ps(in, idx256); //in = _mm256_and_ps(in, _mm256_castsi256_ps(all256_last_zero)); } } } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; //output[output_index] += sum; sum256 = _mm256_mul_ps(sum256, mean256); //printf("\n cur_mean = %f, sum256 = %f, sum256 = %f, in = %f \n", // cur_mean, sum256.m256_f32[0], sum256.m256_f32[1], input[input_pre_index]); //__m256 out = *((__m256*)&output[output_index]); //out = _mm256_add_ps(out, sum256); //*((__m256*)&output[output_index]) = out; *((__m256*)&output[output_index]) = sum256; //_mm256_storeu_ps(&C[i*ldc + j], result256); } } } // http://graphics.stanford.edu/~seander/bithacks.html // https://stackoverflow.com/questions/17354971/fast-counting-the-number-of-set-bits-in-m128i-register // https://arxiv.org/pdf/1611.07612.pdf static inline int popcnt128(__m128i n) { const __m128i n_hi = _mm_unpackhi_epi64(n, n); #if defined(_MSC_VER) return __popcnt64(_mm_cvtsi128_si64(n)) + __popcnt64(_mm_cvtsi128_si64(n_hi)); #elif defined(__APPLE__) && defined(__clang__) return _mm_popcnt_u64(_mm_cvtsi128_si64(n)) + _mm_popcnt_u64(_mm_cvtsi128_si64(n_hi)); #else return __popcntq(_mm_cvtsi128_si64(n)) + __popcntq(_mm_cvtsi128_si64(n_hi)); #endif } static inline int popcnt256(__m256i n) { return popcnt128(_mm256_extractf128_si256(n, 0)) + popcnt128(_mm256_extractf128_si256(n, 1)); } static inline __m256i count256(__m256i v) { __m256i lookup = _mm256_setr_epi8(0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4); __m256i low_mask = _mm256_set1_epi8(0x0f); __m256i lo = _mm256_and_si256(v, low_mask); __m256i hi = _mm256_and_si256(_mm256_srli_epi32(v, 4), low_mask); __m256i popcnt1 = _mm256_shuffle_epi8(lookup, lo); __m256i popcnt2 = _mm256_shuffle_epi8(lookup, hi); __m256i total = _mm256_add_epi8(popcnt1, popcnt2); return _mm256_sad_epu8(total, _mm256_setzero_si256()); } static inline int popcnt256_custom(__m256i n) { __m256i val = count256(n); //return val.m256i_i64[0] + //val.m256i_i64[1] + //val.m256i_i64[2] + //val.m256i_i64[3]; return _mm256_extract_epi64(val, 0) + _mm256_extract_epi64(val, 1) + _mm256_extract_epi64(val, 2) + _mm256_extract_epi64(val, 3); } static inline void xnor_avx2_popcnt(__m256i a_bit256, __m256i b_bit256, __m256i *count_sum) { __m256i c_bit256 = _mm256_set1_epi8((char)255); __m256i xor256 = _mm256_xor_si256(a_bit256, b_bit256); // xnor = not(xor(a,b)) c_bit256 = _mm256_andnot_si256(xor256, c_bit256); // can be optimized - we can do other NOT for wegihts once and do not do this NOT *count_sum = _mm256_add_epi64(count256(c_bit256), *count_sum); // 1st part - popcnt Mula's algorithm } // 2nd part - popcnt Mula's algorithm static inline int get_count_mula(__m256i count_sum) { return _mm256_extract_epi64(count_sum, 0) + _mm256_extract_epi64(count_sum, 1) + _mm256_extract_epi64(count_sum, 2) + _mm256_extract_epi64(count_sum, 3); } // 5x times faster than gemm()-float32 // further optimizations: do mean-mult only for the last layer void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #if defined(_OPENMP) static int max_num_threads = 0; if (max_num_threads == 0) { max_num_threads = omp_get_max_threads(); //omp_set_num_threads(max_num_threads / 2); } #endif //#pragma omp parallel for //for (i = 0; i < M; ++i) #pragma omp parallel for for (i = 0; i < (M/2)*2; i += 2) { // l.n - filters [16 - 55 - 1024] float mean_val_0 = mean_arr[i + 0]; float mean_val_1 = mean_arr[i + 1]; int j, k; //__m256i all_1 = _mm256_set1_epi8(255); //for (j = 0; j < N; ++j) for (j = 0; j < (N/2)*2; j += 2) { // out_h*out_w - one channel output size [169 - 173056] //int count = 0; const int bit_step = 256; __m256i count_sum_0 = _mm256_set1_epi8(0); __m256i count_sum_1 = _mm256_set1_epi8(0); __m256i count_sum_2 = _mm256_set1_epi8(0); __m256i count_sum_3 = _mm256_set1_epi8(0); for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216] __m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8)); __m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8)); __m256i a_bit256_1 = _mm256_loadu_si256((__m256i *)(A + ((i + 1)*lda + k) / 8)); __m256i b_bit256_1 = _mm256_loadu_si256((__m256i *)(B + ((j + 1)*ldb + k) / 8)); xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum_0); xnor_avx2_popcnt(a_bit256_0, b_bit256_1, &count_sum_1); xnor_avx2_popcnt(a_bit256_1, b_bit256_0, &count_sum_2); xnor_avx2_popcnt(a_bit256_1, b_bit256_1, &count_sum_3); //count += popcnt256(c_bit256); //binary_int64_printf(c_bit64); //printf(", count = %d \n\n", tmp_count); } int count_0 = get_count_mula(count_sum_0); int count_1 = get_count_mula(count_sum_1); int count_2 = get_count_mula(count_sum_2); int count_3 = get_count_mula(count_sum_3); const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count_0 = count_0 - f1; // remove extra bits (from empty space for align only) count_1 = count_1 - f1; count_2 = count_2 - f1; count_3 = count_3 - f1; C[i*ldc + (j + 0)] = (2 * count_0 - K) * mean_val_0; C[i*ldc + (j + 1)] = (2 * count_1 - K) * mean_val_0; C[(i + 1)*ldc + (j + 0)] = (2 * count_2 - K) * mean_val_1; C[(i + 1)*ldc + (j + 1)] = (2 * count_3 - K) * mean_val_1; } int i_d; for (i_d = 0; i_d < 2; ++i_d) { float mean_val = mean_arr[i + i_d]; for (j = (N / 2) * 2; j < N; j += 1) { // out_h*out_w - one channel output size [169 - 173056] const int bit_step = 256; __m256i count_sum = _mm256_set1_epi8(0); for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216] __m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + i_d + 0)*lda + k) / 8)); __m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8)); xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum); } int count = get_count_mula(count_sum); const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count = count - f1; // remove extra bits (from empty space for align only) C[(i + i_d)*ldc + j] = (2 * count - K) * mean_val; } } } for (i = (M / 2) * 2; i < M; i += 1) { float mean_val = mean_arr[i]; int j, k; for (j = 0; j < N; j += 1) { // out_h*out_w - one channel output size [169 - 173056] const int bit_step = 256; __m256i count_sum = _mm256_set1_epi8(0); for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216] __m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8)); __m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8)); xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum); } int count = get_count_mula(count_sum); const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count = count - f1; // remove extra bits (from empty space for align only) C[i*ldc + j] = (2 * count - K) * mean_val; } } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_transpose(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int ldb_align) { const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; int c; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1) { #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 4; w+=8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); data_col[col_index + ldb_align * 0] = _mm256_extract_float32(src256, 0);// src256.m256_f32[0]; data_col[col_index + ldb_align * 1] = _mm256_extract_float32(src256, 1);// src256.m256_f32[1]; data_col[col_index + ldb_align * 2] = _mm256_extract_float32(src256, 2);// src256.m256_f32[2]; data_col[col_index + ldb_align * 3] = _mm256_extract_float32(src256, 3);// src256.m256_f32[3]; data_col[col_index + ldb_align * 4] = _mm256_extract_float32(src256, 4);// src256.m256_f32[4]; data_col[col_index + ldb_align * 5] = _mm256_extract_float32(src256, 5);// src256.m256_f32[5]; data_col[col_index + ldb_align * 6] = _mm256_extract_float32(src256, 6);// src256.m256_f32[6]; data_col[col_index + ldb_align * 7] = _mm256_extract_float32(src256, 7);// src256.m256_f32[7]; //_mm256_storeu_ps(&data_col[col_index], src256); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } else { #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = 0; h < height_col; ++h) { for (w = 0; w < width_col; ++w) { int im_row = h_offset + h * stride; int im_col = w_offset + w * stride; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2()) { #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col-pad; ++h) { for (w = pad; w < width_col-pad-8; w += 8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); _mm256_storeu_ps(&data_col[col_index], src256); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { w = width_col-1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = height_col-1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } else { //printf("\n Error: is no non-optimized version \n"); im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_align(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int bit_align) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2()) { int new_ldb = bit_align; #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 8; w += 8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); _mm256_storeu_ps(&data_col[col_index], src256); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } else { printf("\n Error: is no non-optimized version \n"); //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin // float_to_bit(b, t_input, src_size); // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8); } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_bin(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int bit_align) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2()) { __m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); __m256 float_zero256 = _mm256_set1_ps(0.00); int new_ldb = bit_align; #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 8; w += 8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //__m256i src256 = _mm256_loadu_si256((__m256i *)(&data_im[im_col + width*(im_row + height*c_im)])); //__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats //uint16_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1 //mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0 __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); __m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS); uint16_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1 uint16_t* dst_ptr = (uint16_t*)&((uint8_t*)data_col)[col_index / 8]; *dst_ptr |= (mask << (col_index % 8)); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; float val = data_im[im_col + width*(im_row + height*c_im)]; if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } } } else { printf("\n Error: is no non-optimized version \n"); //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin // float_to_bit(b, t_input, src_size); // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8); } } void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a) { int i = 0; if (a == LINEAR) {} else if (a == LEAKY) { if (is_fma_avx2()) { __m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); __m256 all256_01 = _mm256_set1_ps(0.1F); for (i = 0; i < n - 8; i += 8) { //x[i] = (x[i]>0) ? x[i] : .1*x[i]; __m256 src256 = _mm256_loadu_ps(&x[i]); __m256 mult256 = _mm256_mul_ps((src256), all256_01); // mult * 0.1 __m256i sign256 = _mm256_and_si256(_mm256_castps_si256(src256), all256_sing1); // check sign in 8 x 32-bit floats __m256 result256 = _mm256_blendv_ps(src256, mult256, _mm256_castsi256_ps(sign256)); // (sign>0) ? src : mult; _mm256_storeu_ps(&x[i], result256); } } for (; i < n; ++i) { x[i] = (x[i]>0) ? x[i] : .1*x[i]; } } else { for (i = 0; i < n; ++i) { x[i] = activate(x[i], a); } } } void float_to_bit(float *src, unsigned char *dst, size_t size) { size_t dst_size = size / 8 + 1; memset(dst, 0, dst_size); size_t i; //__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); __m256 float_zero256 = _mm256_set1_ps(0.0); for (i = 0; i < size; i+=8) { //__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i])); //__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats //uint32_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1 ////mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0 __m256 src256 = _mm256_loadu_ps((float *)(&src[i])); __m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS); uint32_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1 dst[i / 8] = mask; } } static inline void transpose4x4_SSE(float *A, float *B, const int lda, const int ldb) { __m128 row1 = _mm_loadu_ps(&A[0 * lda]); __m128 row2 = _mm_loadu_ps(&A[1 * lda]); __m128 row3 = _mm_loadu_ps(&A[2 * lda]); __m128 row4 = _mm_loadu_ps(&A[3 * lda]); _MM_TRANSPOSE4_PS(row1, row2, row3, row4); _mm_storeu_ps(&B[0 * ldb], row1); _mm_storeu_ps(&B[1 * ldb], row2); _mm_storeu_ps(&B[2 * ldb], row3); _mm_storeu_ps(&B[3 * ldb], row4); } void transpose_block_SSE4x4(float *A, float *B, const int n, const int m, const int lda, const int ldb, const int block_size) { int i; #pragma omp parallel for for (i = 0; i < n; i += block_size) { int j, i2, j2; //int max_i2 = (i + block_size < n) ? (i + block_size) : n; if (i + block_size < n) { int max_i2 = i + block_size; for (j = 0; j < m; j += block_size) { //int max_j2 = (j + block_size < m) ? (j + block_size) : m; if (j + block_size < m) { int max_j2 = j + block_size; for (i2 = i; i2 < max_i2; i2 += 4) { for (j2 = j; j2 < max_j2; j2 += 4) { transpose4x4_SSE(&A[i2*lda + j2], &B[j2*ldb + i2], lda, ldb); } } } else { for (i2 = i; i2 < max_i2; ++i2) { for (j2 = j; j2 < m; ++j2) { B[j2*ldb + i2] = A[i2*lda + j2]; } } } } } else { for (i2 = i; i2 < n; ++i2) { for (j2 = 0; j2 < m; ++j2) { B[j2*ldb + i2] = A[i2*lda + j2]; } } } } } void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c, int pad, int stride, int batch) { const int w_offset = -pad / 2; const int h_offset = -pad / 2; int b, k; for (b = 0; b < batch; ++b) { #pragma omp parallel for for (k = 0; k < c; ++k) { int i, j, m, n; for (i = 0; i < out_h; ++i) { //for (j = 0; j < out_w; ++j) { j = 0; if(stride == 1 && is_avx() == 1) { for (j = 0; j < out_w - 8 - (size - 1); j += 8) { int out_index = j + out_w*(i + out_h*(k + c*b)); __m256 max256 = _mm256_set1_ps(-FLT_MAX); for (n = 0; n < size; ++n) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); if (!valid) continue; __m256 src256 = _mm256_loadu_ps(&src[index]); max256 = _mm256_max_ps(src256, max256); } } _mm256_storeu_ps(&dst[out_index], max256); } } else if (size == 2 && stride == 2 && is_avx() == 1) { for (j = 0; j < out_w - 4; j += 4) { int out_index = j + out_w*(i + out_h*(k + c*b)); //float max = -FLT_MAX; //int max_i = -1; __m128 max128 = _mm_set1_ps(-FLT_MAX); for (n = 0; n < size; ++n) { //for (m = 0; m < size; ++m) m = 0; { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); if (!valid) continue; __m256 src256 = _mm256_loadu_ps(&src[index]); __m256 src256_2 = _mm256_permute_ps(src256, (1 << 0) | (3 << 4)); __m256 max256 = _mm256_max_ps(src256, src256_2); __m128 src128_0 = _mm256_extractf128_ps(max256, 0); __m128 src128_1 = _mm256_extractf128_ps(max256, 1); __m128 src128 = _mm_shuffle_ps(src128_0, src128_1, (2 << 2) | (2 << 6)); max128 = _mm_max_ps(src128, max128); } } _mm_storeu_ps(&dst[out_index], max128); } } for (; j < out_w; ++j) { int out_index = j + out_w*(i + out_h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; for (n = 0; n < size; ++n) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); float val = (valid != 0) ? src[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } dst[out_index] = max; indexes[out_index] = max_i; } } } } } #else // AVX int is_avx() { return 0; } int is_fma_avx2() { return 0; } void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } } void gemm_nn_fast(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; #pragma omp parallel for for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } } void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA, uint32_t *A, int lda, uint32_t *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n int j, s; float mean_val = mean_arr[i]; //printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]); for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c) { //PUT_IN_REGISTER float A_PART = 1*a[i*k + s]; PUT_IN_REGISTER uint32_t A_PART = A[i * lda + s]; for (j = 0; j < N; ++j) // out_h*out_w; { //c[i*n + j] += A_PART*b[s*n + j]; PUT_IN_REGISTER uint32_t B_PART = B[s * ldb + j]; uint32_t xnor_result = ~(A_PART ^ B_PART); //printf(" xnor_result = %d, ", xnor_result); int32_t count = popcnt_32(xnor_result); // must be Signed int C[i*ldc + j] += (2 * count - 32) * mean_val; //c[i*n + j] += count*mean; } } } } void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride, float *weights, float *input, float *output, float *mean) { const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1 const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1 //int i, f, j; int fil; // filter index #pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP for (fil = 0; fil < n; ++fil) { int chan, y, x, f_y, f_x; // channel index for (chan = 0; chan < c; ++chan) // input - y for (y = 0; y < h; ++y) // input - x for (x = 0; x < w; ++x) { int const output_index = fil*w*h + y*w + x; int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize; int const input_pre_index = chan*w*h; float sum = 0; // filter - y for (f_y = 0; f_y < ksize; ++f_y) { int input_y = y + f_y - pad; // filter - x for (f_x = 0; f_x < ksize; ++f_x) { int input_x = x + f_x - pad; if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue; int input_index = input_pre_index + input_y*w + input_x; int weights_index = weights_pre_index + f_y*ksize + f_x; sum += input[input_index] * weights[weights_index]; } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; output[output_index] += sum; } } } static inline int popcnt_64(uint64_t val64) { #ifdef WIN32 // Windows #ifdef _WIN64 // Windows 64-bit int tmp_count = __popcnt64(val64); #else // Windows 32-bit int tmp_count = __popcnt(val64); tmp_count += __popcnt(val64 >> 32); #endif #else // Linux #if defined(__x86_64__) || defined(__aarch64__) // Linux 64-bit int tmp_count = __builtin_popcountll(val64); #else // Linux 32-bit int tmp_count = __builtin_popcount(val64); tmp_count += __builtin_popcount(val64 >> 32); #endif #endif return tmp_count; } void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] int j, k; float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] int count = 0; for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216] uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); int tmp_count = popcnt_64(c_bit64); if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits count += tmp_count; //binary_int64_printf(c_bit64); //printf(", count = %d \n\n", tmp_count); } C[i*ldc + j] = (2 * count - K) * mean_val; } } } void im2col_cpu_custom_transpose(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int ldb_align) { printf("\n im2col_cpu_custom_transpose() isn't implemented without AVX \n"); } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col) { im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); return; int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1) { #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } else { //printf("\n Error: is no non-optimized version \n"); im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_bin(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int bit_align) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1) { int new_ldb = bit_align; #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 8; w += 1) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; float val = data_im[im_col + width*(im_row + height*c_im)]; if (val > 0) set_bit((unsigned char*)data_col, col_index); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; float val = data_im[im_col + width*(im_row + height*c_im)]; if (val > 0) set_bit((unsigned char*)data_col, col_index); } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char*)data_col, col_index); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char*)data_col, col_index); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char*)data_col, col_index); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char*)data_col, col_index); } } } } else { printf("\n Error: is no non-optimized version \n"); //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin // float_to_bit(b, t_input, src_size); // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8); } } void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a) { int i; if (a == LINEAR) { } else if (a == LEAKY) { for (i = 0; i < n; ++i) { x[i] = (x[i]>0) ? x[i] : .1*x[i]; } } else { for (i = 0; i < n; ++i) { x[i] = activate(x[i], a); } } } void float_to_bit(float *src, unsigned char *dst, size_t size) { size_t dst_size = size / 8 + 1; memset(dst, 0, dst_size); size_t i; char* byte_arr = (char*)calloc(size, sizeof(char)); for (i = 0; i < size; ++i) { if (src[i] > 0) byte_arr[i] = 1; } //for (i = 0; i < size; ++i) { // dst[i / 8] |= byte_arr[i] << (i % 8); //} for (i = 0; i < size; i += 8) { char dst_tmp = 0; dst_tmp |= byte_arr[i + 0] << 0; dst_tmp |= byte_arr[i + 1] << 1; dst_tmp |= byte_arr[i + 2] << 2; dst_tmp |= byte_arr[i + 3] << 3; dst_tmp |= byte_arr[i + 4] << 4; dst_tmp |= byte_arr[i + 5] << 5; dst_tmp |= byte_arr[i + 6] << 6; dst_tmp |= byte_arr[i + 7] << 7; dst[i / 8] = dst_tmp; } free(byte_arr); } static inline void transpose_scalar_block(float *A, float *B, const int lda, const int ldb, const int block_size) { int i; //#pragma omp parallel for for (i = 0; i<block_size; i++) { int j; for (j = 0; j<block_size; j++) { B[j*ldb + i] = A[i*lda + j]; } } } void transpose_block_SSE4x4(float *A, float *B, const int n, const int m, const int lda, const int ldb, const int block_size) { int i; #pragma omp parallel for for (i = 0; i < n; i += block_size) { int j, i2, j2; for (j = 0; j < m; j += block_size) { int max_i2 = i + block_size < n ? i + block_size : n; int max_j2 = j + block_size < m ? j + block_size : m; for (i2 = i; i2 < max_i2; ++i2) { for (j2 = j; j2 < max_j2; ++j2) { B[j2*ldb + i2] = A[i2*lda + j2]; } } } } } void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c, int pad, int stride, int batch) { int b, k; const int w_offset = -pad / 2; const int h_offset = -pad / 2; for (b = 0; b < batch; ++b) { #pragma omp parallel for for (k = 0; k < c; ++k) { int i, j, m, n; for (i = 0; i < out_h; ++i) { for (j = 0; j < out_w; ++j) { int out_index = j + out_w*(i + out_h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; for (n = 0; n < size; ++n) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); float val = (valid != 0) ? src[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } dst[out_index] = max; indexes[out_index] = max_i; } } } } } #endif // AVX // 32 channels -> 1 channel (with 32 floats) // 256 channels -> 8 channels (with 32 floats) void repack_input(float *input, float *re_packed_input, int w, int h, int c) { const int items_per_channel = w * h; int chan, i; for (chan = 0; chan < c; chan += 32) { for (i = 0; i < items_per_channel; ++i) { int c_pack; for (c_pack = 0; c_pack < 32; ++c_pack) { float src = input[(chan + c_pack)*items_per_channel + i]; re_packed_input[chan*items_per_channel + i * 32 + c_pack] = src; } } } } void transpose_uint32(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align) { //l.bit_align - algined (n) by 32 //new_ldb - aligned (k) by 256 int i; //#pragma omp parallel for for (i = 0; i < src_h; i += 1) // l.size*l.size*l.c; { int j; for (j = 0; j < src_w; j += 1) // out_h*out_w; { ((uint32_t *)dst)[j*dst_align / 32 + i] = ((uint32_t *)src)[i*src_align + j]; } } } void gemm_nn_bin_transposed_32bit_packed(int M, int N, int K, float ALPHA, uint32_t *A, int lda, uint32_t *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n int j, s; float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) // out_h*out_w; { float val = 0; for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c) { PUT_IN_REGISTER uint32_t A_PART = ((uint32_t*)A)[i*lda + s]; PUT_IN_REGISTER uint32_t B_PART = ((uint32_t*)B)[j * ldb + s]; uint32_t xnor_result = ~(A_PART ^ B_PART); int32_t count = popcnt_32(xnor_result); // must be Signed int val += (2 * count - 32) * mean_val; } C[i*ldc + j] += val; } } } void convolution_repacked(uint32_t *packed_input, uint32_t *packed_weights, float *output, int w, int h, int c, int n, int size, int pad, int new_lda, float *mean_arr) { int fil; // filter index #pragma omp parallel for for (fil = 0; fil < n; ++fil) { float mean_val = mean_arr[fil]; int chan, y, x, f_y, f_x; // c_pack // channel index for (chan = 0; chan < c / 32; ++chan) //for (chan = 0; chan < l.c; chan += 32) //for (c_pack = 0; c_pack < 32; ++c_pack) // input - y for (y = 0; y < h; ++y) // input - x for (x = 0; x < w; ++x) { int const output_index = fil*w*h + y*w + x; float sum = 0; // filter - y for (f_y = 0; f_y < size; ++f_y) { int input_y = y + f_y - pad; // filter - x for (f_x = 0; f_x < size; ++f_x) { int input_x = x + f_x - pad; if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue; // normal //float input = state.input[(chan + c_pack)*l.w*l.h + input_y*l.w + input_x]; //float weight = l.weights[fil*l.c*l.size*l.size + (chan + c_pack)*l.size*l.size + f_y*l.size + f_x]; // packed //float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack]; //float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack]; //sum += input * weight; //float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack]; //float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack]; //uint32_t bit1 = input > 0; //uint32_t bit2 = weight > 0; //uint32_t count = (~(bit1 ^ bit2)) & 1; //float result = (2 * (float)count - 1) * mean_val; //printf("\n mul = %f, bit1 = %d, bit2 = %d, count = %d, mean = %f, result = %f ", input*weight, bit1, bit2, count, mean_val, result); //sum += result; uint32_t input = ((uint32_t *)packed_input)[chan*w*h + input_y*w + input_x]; //uint32_t weight = ((uint32_t *)l.align_bit_weights)[fil*l.c*l.size*l.size/32 + chan*l.size*l.size + f_y*l.size + f_x]; uint32_t weight = ((uint32_t *)packed_weights)[fil*new_lda / 32 + chan*size*size + f_y*size + f_x]; uint32_t xnor_result = ~(input ^ weight); int32_t count = popcnt_32(xnor_result); // mandatory Signed int sum += (2 * count - 32) * mean_val; } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; output[output_index] += sum; } } } void gemm_nt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ PUT_IN_REGISTER float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i*lda+k]*B[j*ldb + k]; } C[i*ldc+j] += sum; } } } void gemm_tn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ PUT_IN_REGISTER float A_PART = ALPHA * A[k * lda + i]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_tt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ PUT_IN_REGISTER float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i+k*lda]*B[k+j*ldb]; } C[i*ldc+j] += sum; } } } void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); if (BETA != 1){ int i, j; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ C[i*ldc + j] *= BETA; } } } is_avx(); // initialize static variable if (is_fma_avx2() && !TA && !TB) { gemm_nn_fast(M, N, K, ALPHA, A, lda, B, ldb, C, ldc); } else { int t; #pragma omp parallel for for (t = 0; t < M; ++t) { if (!TA && !TB) gemm_nn(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc); else if (TA && !TB) gemm_tn(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc); else if (!TA && TB) gemm_nt(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc); else gemm_tt(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc); } } } #ifdef GPU #include <math.h> void gemm_ongpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A_gpu, int lda, float *B_gpu, int ldb, float BETA, float *C_gpu, int ldc) { cublasHandle_t handle = blas_handle(); cudaError_t stream_status = (cudaError_t)cublasSetStream(handle, get_cuda_stream()); CHECK_CUDA(stream_status); cudaError_t status = (cudaError_t)cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N), (TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc); CHECK_CUDA(status); } void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { float *A_gpu = cuda_make_array(A, (TA ? lda*K:lda*M)); float *B_gpu = cuda_make_array(B, (TB ? ldb*N : ldb*K)); float *C_gpu = cuda_make_array(C, ldc*M); gemm_ongpu(TA, TB, M, N, K, ALPHA, A_gpu, lda, B_gpu, ldb, BETA, C_gpu, ldc); cuda_pull_array(C_gpu, C, ldc*M); cuda_free(A_gpu); cuda_free(B_gpu); cuda_free(C_gpu); } #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> void time_gpu_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<32; ++i){ gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void time_ongpu(int TA, int TB, int m, int k, int n) { int iter = 10; float *a = random_matrix(m,k); float *b = random_matrix(k,n); int lda = (!TA)?k:m; int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *a_cl = cuda_make_array(a, m*k); float *b_cl = cuda_make_array(b, k*n); float *c_cl = cuda_make_array(c, m*n); int i; clock_t start = clock(), end; for(i = 0; i<iter; ++i){ gemm_ongpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n); cudaDeviceSynchronize(); } double flop = ((double)m)*n*(2.*k + 2.)*iter; double gflop = flop/pow(10., 9); end = clock(); double seconds = sec(end-start); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds); cuda_free(a_cl); cuda_free(b_cl); cuda_free(c_cl); free(a); free(b); free(c); } void test_gpu_accuracy(int TA, int TB, int m, int k, int n) { srand(0); float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *c_gpu = random_matrix(m,n); memset(c, 0, m*n*sizeof(float)); memset(c_gpu, 0, m*n*sizeof(float)); int i; //pm(m,k,b); gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n); //printf("GPU\n"); //pm(m, n, c_gpu); gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); //printf("\n\nCPU\n"); //pm(m, n, c); double sse = 0; for(i = 0; i < m*n; ++i) { //printf("%f %f\n", c[i], c_gpu[i]); sse += pow(c[i]-c_gpu[i], 2); } printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n)); free(a); free(b); free(c); free(c_gpu); } int test_gpu_blas() { /* test_gpu_accuracy(0,0,10,576,75); test_gpu_accuracy(0,0,17,10,10); test_gpu_accuracy(1,0,17,10,10); test_gpu_accuracy(0,1,17,10,10); test_gpu_accuracy(1,1,17,10,10); test_gpu_accuracy(0,0,1000,10,100); test_gpu_accuracy(1,0,1000,10,100); test_gpu_accuracy(0,1,1000,10,100); test_gpu_accuracy(1,1,1000,10,100); test_gpu_accuracy(0,0,10,10,10); time_ongpu(0,0,64,2916,363); time_ongpu(0,0,64,2916,363); time_ongpu(0,0,64,2916,363); time_ongpu(0,0,192,729,1600); time_ongpu(0,0,384,196,1728); time_ongpu(0,0,256,196,3456); time_ongpu(0,0,256,196,2304); time_ongpu(0,0,128,4096,12544); time_ongpu(0,0,128,4096,4096); */ time_ongpu(0,0,64,75,12544); time_ongpu(0,0,64,75,12544); time_ongpu(0,0,64,75,12544); time_ongpu(0,0,64,576,12544); time_ongpu(0,0,256,2304,784); time_ongpu(1,1,2304,256,784); time_ongpu(0,0,512,4608,196); time_ongpu(1,1,4608,512,196); return 0; } #endif
Scan.h
/* This file is part of the implementation for the technical paper Field-Aligned Online Surface Reconstruction Nico Schertler, Marco Tarini, Wenzel Jakob, Misha Kazhdan, Stefan Gumhold, Daniele Panozzo ACM TOG 36, 4, July 2017 (Proceedings of SIGGRAPH 2017) Use of this source code is granted via a BSD-style license, which can be found in License.txt in the repository root. @author Nico Schertler */ #pragma once #include "osr/common.h" #include "osr/INeighborQueryable.h" #include "osr/HierarchyDecl.h" #include "osr/nanoflannForwardDeclare.h" #include "3rd/ICP.h" #include <nsessentials/math/Morton.h> #include <nsessentials/math/BoundingBox.h> #include <nsessentials/gui/GLBuffer.h> #include <nsessentials/gui/GLVertexArray.h> #include <nsessentials/util/TimedBlock.h> #include <random> #include <iostream> #include <memory> #include <map> namespace osr { class Scan; class OSR_EXPORT IScanRenderer { public: virtual void initialize(Scan& scan) = 0; virtual void updateData(const Scan& scan) = 0; virtual void draw(const Scan& scan, const Eigen::Matrix4f & v, const Eigen::Matrix4f & proj) const = 0; bool showInput; bool showNormals; }; //Represents data of a single scan class OSR_EXPORT Scan : public IPointQueryable<size_t> { public: Scan(const Matrix3Xf& V = Matrix3Xf(), const Matrix3Xf& N = Matrix3Xf(), const Matrix3Xus& C = Matrix3Xus(), const MatrixXu& F = MatrixXu(), const std::string& name = "unnamed", const Eigen::Affine3f& transform = Eigen::Affine3f::Identity()); ~Scan(); void initialize(); //Calculates the vertex normals if not already present. //If there are faces in the data set, uses averaged face normals. //Otherwise, uses PCA. PCA assumes normals to point towards the origin. void calculateNormals(); //Access to transformed attributes Vector3f p(size_t idx) const; //vertex position Vector3f n(size_t idx) const; //normal const std::string& getName() { return name; } const nse::math::BoundingBox<float, 3> boundingBox() const { return bbox; } nse::math::BoundingBox<float, 3> getTransformedBoundingBox() const; void updateData(); const Matrix3Xf& V() const { return mV; } Matrix3Xf& V() { return mV; } const Matrix3Xf& N() const { return mN; } Matrix3Xf& N() { return mN; } const Matrix3Xus& C() const { return mC; } Matrix3Xus& C() { return mC; } const MatrixXu& F() const { return mF; } MatrixXu& F() { return mF; } //Modifies the scan transform via ICP so as to register to other. template <typename Index> void alignTo(const IPointQueryable<Index>& other, int iterations = 20, double subsample = 0.1); //Removes all points that overlap the hierarchy (i.e. there is a point in the hierarchy with a distance of at most "distance"). void cleanOverlap(const THierarchy& hierarchy, float distance); const Eigen::Affine3f& transform() const { return mTransform; } Eigen::Affine3f& transform() { return mTransform; } std::shared_ptr<IScanRenderer> renderer; // ---------- nanoflann interface ---------- typedef nanoflann::KDTreeSingleIndexAdaptor< nanoflann::L2_Adaptor<float, Scan, float>, Scan, 3, size_t> KdTreeType; inline size_t kdtree_get_point_count() const { return mV.cols(); } inline float kdtree_distance(const float *p1, const size_t idx_p2, size_t size) const { float s = 0; for (size_t i = 0; i < size; ++i) { const float d = p1[i] - mV.coeff(i, idx_p2); s += d*d; } return s; } inline float kdtree_get_pt(const size_t idx, int dim) const { return mV.coeff(dim, idx); } template <class BBOX> bool kdtree_get_bbox(BBOX& bb) const { for (int i = 0; i < 3; ++i) { bb[i].low = bbox.min(i); bb[i].high = bbox.max(i); } return true; } // ---------- end nanoflann interface ---------- void buildTree(); Vector3f neighborP(const size_t& i) const { return mV.col(i); } //access to point position Vector3f neighborN(const size_t& i) const { return mN.col(i); }; //access to point normal bool isIndexValid(const size_t& idx) const { return idx < mV.cols(); } //Finds the closest point that has a similar normal as the provided one size_t findClosestCompatiblePoint(const Vector3f& p, const Vector3f& n) const; float closestPointRadius = 30; #ifdef USE_DAVIDVIVE struct { Eigen::Affine3f transformUncalibrated; //turntable + controller transform Eigen::Affine3f turntableRotation; Eigen::Affine3f davidToVive; } davidViveData; #endif private: KdTreeType* kdTree = nullptr; private: void calculateNormalsFromFaces(); void calculateNormalsPCA(); Matrix3Xf mV; //positions Matrix3Xf mN; //normals Matrix3Xus mC; //colors MatrixXu mF; //faces std::string name; nse::math::BoundingBox<float, 3> bbox; Eigen::Affine3f mTransform; }; template <typename Index> void Scan::alignTo(const IPointQueryable<Index>& other, int iterations, double subsample) { nse::util::TimedBlock b("Registering scan .."); std::vector<Index> correspondences(mV.cols()); //For each point, find the corresponding point in the other point cloud. #pragma omp parallel for for (int i = 0; i < mV.cols(); ++i) { if (std::isnan(mV.col(i).x())) continue; correspondences[i] = other.findClosestCompatiblePoint(mTransform * mV.col(i), mTransform.linear() * mN.col(i)); } //Distribute the points with a correspondence into normal buckets. std::map<nse::math::MortonCode64, std::vector<size_t>> normalBucketsMap; for (int i = 0; i < mV.cols(); ++i) { if (!std::isnan(mV.col(i).x()) && other.isIndexValid(correspondences[i])) { Vector3i discrete = (mN.col(i) * 10).cast<int>(); nse::math::MortonCode64 code(discrete.x(), discrete.y(), discrete.z()); normalBucketsMap[code].push_back(i); } } std::vector<std::vector<size_t>> normalBuckets; int potentialSamples = 0; for (auto& entry : normalBucketsMap) { potentialSamples += entry.second.size(); normalBuckets.push_back(std::move(entry.second)); } normalBucketsMap.clear(); if (potentialSamples < 10) { std::cout << "Could not find enough overlap. Registration will abort." << std::endl; return; } int samples = (int)(potentialSamples * subsample); std::uniform_int_distribution<size_t> bucketDist(0, normalBuckets.size() - 1); std::mt19937 rnd; Matrix3Xf X(3, samples), N(3, samples); //subsample the point cloud for ICP for (int i = 0; i < samples; ++i) { size_t sample; if (subsample == 1) sample = i; else { //normal space sampling bool sampleOk = false; int attempt = 0; while (!sampleOk && attempt++ < 10) { auto bucketIdx = bucketDist(rnd); auto& bucket = normalBuckets[bucketIdx]; std::uniform_int_distribution<size_t> sampleDist(0, bucket.size() - 1); auto sampleIdx = sampleDist(rnd); sample = bucket[sampleIdx]; if (std::isnan(mV.coeff(0, sample)) || std::isnan(mN.coeff(0, sample))) continue; sampleOk = true; bucket.erase(bucket.begin() + sampleIdx); if (bucket.empty()) { normalBuckets.erase(normalBuckets.begin() + bucketIdx); bucketDist = std::uniform_int_distribution<size_t>(0, normalBuckets.size() - 1); } } } X.col(i) = mTransform * mV.col(sample); N.col(i) = mTransform.linear() * mN.col(sample); } //Run ICP SICP::Parameters params; params.p = 1.5; params.max_icp = iterations; params.max_outer = 10; params.max_inner = 1; Eigen::setNbThreads(0); mTransform = SICP::point_to_plane(X, N, other, params) * mTransform; Eigen::setNbThreads(1); } }
GB_unaryop__identity_int64_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int64_bool // op(A') function: GB_tran__identity_int64_bool // C type: int64_t // A type: bool // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int64_bool ( int64_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int64_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ConvolutionUnfold.h
#pragma once #include <string.h> #include <math.h> #include <algorithm> #include "General.h" #include "TensorRef.h" #include "Vector-inl.h" OPS_API int TS_Unfolded_Copy( TensorRef* finput, TensorRef* input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight); OPS_API int TS_Unfolded_Acc( TensorRef *finput, TensorRef *input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight); // note: due to write issues, this one cannot be parallelized as well as unfolded_copy template<typename T> void unfolded_acc( TensorRef *finput, TensorRef *input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight) { size_t nip; T *input_data = (T*)input->buffer; T *finput_data = (T*)finput->buffer; #pragma omp parallel for private(nip) for (nip = 0; nip < nInputPlane; nip++) { size_t kw, kh, y, x; __int64 ix = 0, iy = 0; for (kh = 0; kh < kH; kh++) { for (kw = 0; kw < kW; kw++) { T *src = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth); T *dst = input_data + nip*(inputHeight*inputWidth); if (padW > 0 || padH > 0) { size_t lpad, rpad; for (y = 0; y < outputHeight; y++) { iy = (__int64)(y*dH - padH + kh); if (iy < 0 || iy >= inputHeight) { } else { if (dW == 1) { ix = (__int64)(0 - padW + kw); lpad = std::max(size_t(0), (padW - kw)); rpad = std::max(size_t(0), (padW - (kW - kw - 1))); Vector_add<T>(dst + (size_t)(iy*inputWidth + ix + lpad), src + (size_t)(y*outputWidth + lpad), 1, outputWidth - lpad - rpad); } else { for (x = 0; x<outputWidth; x++) { ix = (__int64)(x*dW - padW + kw); if (ix < 0 || ix >= inputWidth) { } else Vector_add<T>(dst + (size_t)(iy*inputWidth + ix), src + (size_t)(y*outputWidth + x), 1, 1); } } } } } else { for (y = 0; y < outputHeight; y++) { iy = (__int64)(y*dH + kh); ix = (__int64)(0 + kw); if (dW == 1) Vector_add<T>(dst + (size_t)(iy*inputWidth + ix), src + (size_t)(y*outputWidth), 1, outputWidth); else { for (x = 0; x < outputWidth; x++) Vector_add<T>(dst + (size_t)(iy*inputWidth + ix + x*dW), src + (size_t)(y*outputWidth + x), 1, 1); } } } } } } } template<typename T> void unfolded_copy(TensorRef *finput, TensorRef *input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight) { long k; T *input_data = (T*)input->buffer; T *finput_data = (T*)finput->buffer; #pragma omp parallel for private(k) for (k = 0; k < nInputPlane*kH*kW; k++) { size_t nip = k / (kH*kW); size_t rest = k % (kH*kW); size_t kh = rest / kW; size_t kw = rest % kW; size_t x, y; __int64 ix, iy; T *dst = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth); T *src = input_data + nip*(inputHeight*inputWidth); if (padW > 0 || padH > 0) { size_t lpad, rpad; for (y = 0; y < outputHeight; y++) { iy = (__int64)(y*dH - padH + kh); if (iy < 0 || iy >= inputHeight) { memset(dst + y*outputWidth, 0, sizeof(T)*outputWidth); } else { if (dW == 1) { ix = (__int64)(0 - padW + kw); lpad = std::max(size_t(0), (padW - kw)); rpad = std::max(size_t(0), (padW - (kW - kw - 1))); if (outputWidth - rpad - lpad <= 0) { memset(dst + (size_t)(y*outputWidth), 0, sizeof(T)*outputWidth); } else { if (lpad > 0) memset(dst + y*outputWidth, 0, sizeof(T)*lpad); memcpy(dst + (size_t)(y*outputWidth + lpad), src + (size_t)(iy*inputWidth + ix + lpad), sizeof(T)*(outputWidth - rpad - lpad)); if (rpad > 0) memset(dst + y*outputWidth + outputWidth - rpad, 0, sizeof(T)*rpad); } } else { for (x = 0; x<outputWidth; x++) { ix = (__int64)(x*dW - padW + kw); if (ix < 0 || ix >= inputWidth) memset(dst + (size_t)(y*outputWidth + x), 0, sizeof(T) * 1); else memcpy(dst + (size_t)(y*outputWidth + x), src + (size_t)(iy*inputWidth + ix), sizeof(T)*(1)); } } } } } else { for (y = 0; y < outputHeight; y++) { iy = (__int64)(y*dH + kh); ix = (__int64)(0 + kw); if (dW == 1) memcpy(dst + (size_t)(y*outputWidth), src + (size_t)(iy*inputWidth + ix), sizeof(T)*outputWidth); else { for (x = 0; x<outputWidth; x++) memcpy(dst + (size_t)(y*outputWidth + x), src + (size_t)(iy*inputWidth + ix + x*dW), sizeof(T)*(1)); } } } } }
strassen.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /**********************************************************************************************/ /* * Copyright (c) 1996 Massachusetts Institute of Technology * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to use, copy, modify, and distribute the Software without * restriction, provided the Software, including any modified copies made * under this license, is not distributed for a fee, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE MASSACHUSETTS INSTITUTE OF TECHNOLOGY BE LIABLE * FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * /WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * Except as contained in this notice, the name of the Massachusetts * Institute of Technology shall not be used in advertising or otherwise * to promote the sale, use or other dealings in this Software without * prior written authorization from the Massachusetts Institute of * Technology. * */ #include <stdlib.h> #include "strassen.h" #include "main.h" #include "timer.h" /***************************************************************************** ** ** FastNaiveMatrixMultiply ** ** For small to medium sized matrices A, B, and C of size ** MatrixSize * MatrixSize this function performs the operation ** C = A x B efficiently. ** ** Note MatrixSize must be divisible by 8. ** ** INPUT: ** C = (*C WRITE) Address of top left element of matrix C. ** A = (*A IS READ ONLY) Address of top left element of matrix A. ** B = (*B IS READ ONLY) Address of top left element of matrix B. ** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n) ** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1] ** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1] ** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1] ** ** OUTPUT: ** C = (*C WRITE) Matrix C contains A x B. (Initial value of *C undefined.) ** *****************************************************************************/ static void FastNaiveMatrixMultiply(REAL *C, REAL *A, REAL *B, unsigned MatrixSize, unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB) { /* Assumes size of real is 8 bytes */ PTR RowWidthBInBytes = RowWidthB << 3; PTR RowWidthAInBytes = RowWidthA << 3; PTR MatrixWidthInBytes = MatrixSize << 3; PTR RowIncrementC = ( RowWidthC - MatrixSize) << 3; unsigned Horizontal, Vertical; REAL *ARowStart = A; for (Vertical = 0; Vertical < MatrixSize; Vertical++) { for (Horizontal = 0; Horizontal < MatrixSize; Horizontal += 8) { REAL *BColumnStart = B + Horizontal; REAL FirstARowValue = *ARowStart++; REAL Sum0 = FirstARowValue * (*BColumnStart); REAL Sum1 = FirstARowValue * (*(BColumnStart+1)); REAL Sum2 = FirstARowValue * (*(BColumnStart+2)); REAL Sum3 = FirstARowValue * (*(BColumnStart+3)); REAL Sum4 = FirstARowValue * (*(BColumnStart+4)); REAL Sum5 = FirstARowValue * (*(BColumnStart+5)); REAL Sum6 = FirstARowValue * (*(BColumnStart+6)); REAL Sum7 = FirstARowValue * (*(BColumnStart+7)); unsigned Products; for (Products = 1; Products < MatrixSize; Products++) { REAL ARowValue = *ARowStart++; BColumnStart = (REAL*) (((PTR) BColumnStart) + RowWidthBInBytes); Sum0 += ARowValue * (*BColumnStart); Sum1 += ARowValue * (*(BColumnStart+1)); Sum2 += ARowValue * (*(BColumnStart+2)); Sum3 += ARowValue * (*(BColumnStart+3)); Sum4 += ARowValue * (*(BColumnStart+4)); Sum5 += ARowValue * (*(BColumnStart+5)); Sum6 += ARowValue * (*(BColumnStart+6)); Sum7 += ARowValue * (*(BColumnStart+7)); } ARowStart = (REAL*) ( ((PTR) ARowStart) - MatrixWidthInBytes); *(C) = Sum0; *(C+1) = Sum1; *(C+2) = Sum2; *(C+3) = Sum3; *(C+4) = Sum4; *(C+5) = Sum5; *(C+6) = Sum6; *(C+7) = Sum7; C+=8; } ARowStart = (REAL*) ( ((PTR) ARowStart) + RowWidthAInBytes ); C = (REAL*) ( ((PTR) C) + RowIncrementC ); } } /***************************************************************************** ** ** FastAdditiveNaiveMatrixMultiply ** ** For small to medium sized matrices A, B, and C of size ** MatrixSize * MatrixSize this function performs the operation ** C += A x B efficiently. ** ** Note MatrixSize must be divisible by 8. ** ** INPUT: ** C = (*C READ/WRITE) Address of top left element of matrix C. ** A = (*A IS READ ONLY) Address of top left element of matrix A. ** B = (*B IS READ ONLY) Address of top left element of matrix B. ** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n) ** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1] ** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1] ** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1] ** ** OUTPUT: ** C = (*C READ/WRITE) Matrix C contains C + A x B. ** *****************************************************************************/ static void FastAdditiveNaiveMatrixMultiply(REAL *C, REAL *A, REAL *B, unsigned MatrixSize, unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB) { /* Assumes size of real is 8 bytes */ PTR RowWidthBInBytes = RowWidthB << 3; PTR RowWidthAInBytes = RowWidthA << 3; PTR MatrixWidthInBytes = MatrixSize << 3; PTR RowIncrementC = ( RowWidthC - MatrixSize) << 3; unsigned Horizontal, Vertical; REAL *ARowStart = A; for (Vertical = 0; Vertical < MatrixSize; Vertical++) { for (Horizontal = 0; Horizontal < MatrixSize; Horizontal += 8) { REAL *BColumnStart = B + Horizontal; REAL Sum0 = *C; REAL Sum1 = *(C+1); REAL Sum2 = *(C+2); REAL Sum3 = *(C+3); REAL Sum4 = *(C+4); REAL Sum5 = *(C+5); REAL Sum6 = *(C+6); REAL Sum7 = *(C+7); unsigned Products; for (Products = 0; Products < MatrixSize; Products++) { REAL ARowValue = *ARowStart++; Sum0 += ARowValue * (*BColumnStart); Sum1 += ARowValue * (*(BColumnStart+1)); Sum2 += ARowValue * (*(BColumnStart+2)); Sum3 += ARowValue * (*(BColumnStart+3)); Sum4 += ARowValue * (*(BColumnStart+4)); Sum5 += ARowValue * (*(BColumnStart+5)); Sum6 += ARowValue * (*(BColumnStart+6)); Sum7 += ARowValue * (*(BColumnStart+7)); BColumnStart = (REAL*) (((PTR) BColumnStart) + RowWidthBInBytes); } ARowStart = (REAL*) ( ((PTR) ARowStart) - MatrixWidthInBytes); *(C) = Sum0; *(C+1) = Sum1; *(C+2) = Sum2; *(C+3) = Sum3; *(C+4) = Sum4; *(C+5) = Sum5; *(C+6) = Sum6; *(C+7) = Sum7; C+=8; } ARowStart = (REAL*) ( ((PTR) ARowStart) + RowWidthAInBytes ); C = (REAL*) ( ((PTR) C) + RowIncrementC ); } } /***************************************************************************** ** ** MultiplyByDivideAndConquer ** ** For medium to medium-large (would you like fries with that) sized ** matrices A, B, and C of size MatrixSize * MatrixSize this function ** efficiently performs the operation ** C = A x B (if AdditiveMode == 0) ** C += A x B (if AdditiveMode != 0) ** ** Note MatrixSize must be divisible by 16. ** ** INPUT: ** C = (*C READ/WRITE) Address of top left element of matrix C. ** A = (*A IS READ ONLY) Address of top left element of matrix A. ** B = (*B IS READ ONLY) Address of top left element of matrix B. ** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n) ** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1] ** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1] ** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1] ** AdditiveMode = 0 if we want C = A x B, otherwise we'll do C += A x B ** ** OUTPUT: ** C (+)= A x B. (+ if AdditiveMode != 0) ** *****************************************************************************/ void MultiplyByDivideAndConquer(REAL *C, REAL *A, REAL *B, unsigned MatrixSize, unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB, int AdditiveMode ) { REAL *A01, *A10, *A11, *B01, *B10, *B11, *C01, *C10, *C11; unsigned QuadrantSize = MatrixSize >> 1; /* partition the matrix */ A01 = A + QuadrantSize; A10 = A + RowWidthA * QuadrantSize; A11 = A10 + QuadrantSize; B01 = B + QuadrantSize; B10 = B + RowWidthB * QuadrantSize; B11 = B10 + QuadrantSize; C01 = C + QuadrantSize; C10 = C + RowWidthC * QuadrantSize; C11 = C10 + QuadrantSize; if (QuadrantSize > SizeAtWhichNaiveAlgorithmIsMoreEfficient) { MultiplyByDivideAndConquer(C, A, B, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, AdditiveMode); MultiplyByDivideAndConquer(C01, A, B01, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, AdditiveMode); MultiplyByDivideAndConquer(C11, A10, B01, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, AdditiveMode); MultiplyByDivideAndConquer(C10, A10, B, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, AdditiveMode); MultiplyByDivideAndConquer(C, A01, B10, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, 1); MultiplyByDivideAndConquer(C01, A01, B11, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, 1); MultiplyByDivideAndConquer(C11, A11, B11, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, 1); MultiplyByDivideAndConquer(C10, A11, B10, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, 1); } else { if (AdditiveMode) { FastAdditiveNaiveMatrixMultiply(C, A, B, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastAdditiveNaiveMatrixMultiply(C01, A, B01, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastAdditiveNaiveMatrixMultiply(C11, A10, B01, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastAdditiveNaiveMatrixMultiply(C10, A10, B, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); } else { FastNaiveMatrixMultiply(C, A, B, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastNaiveMatrixMultiply(C01, A, B01, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastNaiveMatrixMultiply(C11, A10, B01, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastNaiveMatrixMultiply(C10, A10, B, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); } FastAdditiveNaiveMatrixMultiply(C, A01, B10, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastAdditiveNaiveMatrixMultiply(C01, A01, B11, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastAdditiveNaiveMatrixMultiply(C11, A11, B11, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastAdditiveNaiveMatrixMultiply(C10, A11, B10, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); } return; } /* * Set an n by n matrix A to random values. The distance between * rows is an */ static void init_matrix(int n, REAL *A, int an, unsigned int bs) { int i, j; #pragma omp parallel #pragma omp master for (i = 0; i < n; i+=bs) for (j = 0; j < n; j+=bs) { #pragma omp task firstprivate(i,j,bs,an) { unsigned int seed = rand(); int ii, jj; for (ii = i; ii < i+bs; ++ii) for (jj = 0; jj < j+bs; ++jj) ELEM(A, an, ii, jj) = ((double) rand_r(&seed) / RAND_MAX); } } } /* * Compare two matrices. Print an error message if they differ by * more than EPSILON. */ static int compare_matrix(int n, REAL *A, int an, REAL *B, int bn) { int i, j; REAL c; for (i = 0; i < n; ++i) for (j = 0; j < n; ++j) { /* compute the relative error c */ c = ELEM(A, an, i, j) - ELEM(B, bn, i, j); if (c < 0.0) c = -c; c = c / ELEM(A, an, i, j); // printf("C: %f A: %f B: %f\n", c, ELEM(A, an, i, j), ELEM(B, bn, i, j)); if (c > EPSILON) return 0; } return 1; } void matrix_multiply(double* A, double* B, double* C, int matrix_size) { int i, j, k; for(i=0; i<matrix_size; i++) for(j=0; j<matrix_size; j++) { double res = 0; for(k=0; k<matrix_size; k++) res += A[i * matrix_size + k] * B[k * matrix_size + j]; C[i * matrix_size + j] = res; } } double run(struct user_parameters* params) { double *A, *B, *C; int matrix_size = params->matrix_size; int cutoff_size = params->cutoff_size; int cutoff_depth = params->cutoff_depth; if (matrix_size <= 0) { matrix_size = 256; params->matrix_size = matrix_size; } if (cutoff_size <= 0) { cutoff_size = 64; params->cutoff_size = cutoff_size; } if (cutoff_depth <= 0) { cutoff_depth = 4; params->cutoff_depth = cutoff_depth; } A = (double *) malloc (matrix_size * matrix_size * sizeof(double)); B = (double *) malloc (matrix_size * matrix_size * sizeof(double)); C = (double *) malloc (matrix_size * matrix_size * sizeof(double)); init_matrix(matrix_size,A,matrix_size, matrix_size/8); init_matrix(matrix_size,B,matrix_size, matrix_size/8); /// KERNEL INTENSIVE COMPUTATION START_TIMER; #ifndef _OPENMP strassen_main_seq(C, A, B, matrix_size, cutoff_size); #else strassen_main_par(C, A, B, matrix_size, cutoff_size, cutoff_depth); #endif END_TIMER; if(params->check) { double *D = (double *) malloc (matrix_size * matrix_size * sizeof(double)); matrix_multiply(A, B, D, matrix_size); params->succeed = compare_matrix(matrix_size, C, matrix_size, D, matrix_size); free(D); } free(A); free(B); free(C); return TIMER; }
ompcompress.c
#ifdef _OPENMP /* compress 1d contiguous array in parallel */ static void _t2(compress_omp, Scalar, 1)(zfp_stream* stream, const zfp_field* field) { /* array metadata */ const Scalar* data = (const Scalar*)field->data; uint nx = field->nx; /* number of omp threads, blocks, and chunks */ uint threads = thread_count_omp(stream); uint blocks = (nx + 3) / 4; uint chunks = chunk_count_omp(stream, blocks, threads); /* allocate per-thread streams */ bitstream** bs = compress_init_par(stream, field, chunks, blocks); if (!bs) return; /* compress chunks of blocks in parallel */ int chunk; #pragma omp parallel for num_threads(threads) for (chunk = 0; chunk < (int)chunks; chunk++) { /* determine range of block indices assigned to this thread */ uint bmin = chunk_offset(blocks, chunks, chunk + 0); uint bmax = chunk_offset(blocks, chunks, chunk + 1); uint block; /* set up thread-local bit stream */ zfp_stream s = *stream; zfp_stream_set_bit_stream(&s, bs[chunk]); /* compress sequence of blocks */ for (block = bmin; block < bmax; block++) { /* determine block origin x within array */ const Scalar* p = data; uint x = 4 * block; p += x; /* compress partial or full block */ if (nx - x < 4) _t2(zfpns.zfp_encode_partial_block_strided, Scalar, 1)(&s, p, MIN(nx - x, 4u), 1); else _t2(zfpns.zfp_encode_block, Scalar, 1)(&s, p); } } /* concatenate per-thread streams */ compress_finish_par(stream, bs, chunks); } /* compress 1d strided array in parallel */ static void _t2(compress_strided_omp, Scalar, 1)(zfp_stream* stream, const zfp_field* field) { /* array metadata */ const Scalar* data = (const Scalar*)field->data; uint nx = field->nx; int sx = field->sx ? field->sx : 1; /* number of omp threads, blocks, and chunks */ uint threads = thread_count_omp(stream); uint blocks = (nx + 3) / 4; uint chunks = chunk_count_omp(stream, blocks, threads); /* allocate per-thread streams */ bitstream** bs = compress_init_par(stream, field, chunks, blocks); if (!bs) return; /* compress chunks of blocks in parallel */ int chunk; #pragma omp parallel for num_threads(threads) for (chunk = 0; chunk < (int)chunks; chunk++) { /* determine range of block indices assigned to this thread */ uint bmin = chunk_offset(blocks, chunks, chunk + 0); uint bmax = chunk_offset(blocks, chunks, chunk + 1); uint block; /* set up thread-local bit stream */ zfp_stream s = *stream; zfp_stream_set_bit_stream(&s, bs[chunk]); /* compress sequence of blocks */ for (block = bmin; block < bmax; block++) { /* determine block origin x within array */ const Scalar* p = data; uint x = 4 * block; p += sx * (ptrdiff_t)x; /* compress partial or full block */ if (nx - x < 4) _t2(zfpns.zfp_encode_partial_block_strided, Scalar, 1)(&s, p, MIN(nx - x, 4u), sx); else _t2(zfpns.zfp_encode_block_strided, Scalar, 1)(&s, p, sx); } } /* concatenate per-thread streams */ compress_finish_par(stream, bs, chunks); } /* compress 2d strided array in parallel */ static void _t2(compress_strided_omp, Scalar, 2)(zfp_stream* stream, const zfp_field* field) { /* array metadata */ const Scalar* data = (const Scalar*)field->data; uint nx = field->nx; uint ny = field->ny; int sx = field->sx ? field->sx : 1; int sy = field->sy ? field->sy : (int)nx; /* number of omp threads, blocks, and chunks */ uint threads = thread_count_omp(stream); uint bx = (nx + 3) / 4; uint by = (ny + 3) / 4; uint blocks = bx * by; uint chunks = chunk_count_omp(stream, blocks, threads); /* allocate per-thread streams */ bitstream** bs = compress_init_par(stream, field, chunks, blocks); if (!bs) return; /* compress chunks of blocks in parallel */ int chunk; #pragma omp parallel for num_threads(threads) for (chunk = 0; chunk < (int)chunks; chunk++) { /* determine range of block indices assigned to this thread */ uint bmin = chunk_offset(blocks, chunks, chunk + 0); uint bmax = chunk_offset(blocks, chunks, chunk + 1); uint block; /* set up thread-local bit stream */ zfp_stream s = *stream; zfp_stream_set_bit_stream(&s, bs[chunk]); /* compress sequence of blocks */ for (block = bmin; block < bmax; block++) { /* determine block origin (x, y) within array */ const Scalar* p = data; uint b = block; uint x, y; x = 4 * (b % bx); b /= bx; y = 4 * b; p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y; /* compress partial or full block */ if (nx - x < 4 || ny - y < 4) _t2(zfpns.zfp_encode_partial_block_strided, Scalar, 2)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), sx, sy); else _t2(zfpns.zfp_encode_block_strided, Scalar, 2)(&s, p, sx, sy); } } /* concatenate per-thread streams */ compress_finish_par(stream, bs, chunks); } /* compress 3d strided array in parallel */ static void _t2(compress_strided_omp, Scalar, 3)(zfp_stream* stream, const zfp_field* field) { /* array metadata */ const Scalar* data = (const Scalar*)field->data; uint nx = field->nx; uint ny = field->ny; uint nz = field->nz; int sx = field->sx ? field->sx : 1; int sy = field->sy ? field->sy : (int)nx; int sz = field->sz ? field->sz : (int)(nx * ny); /* number of omp threads, blocks, and chunks */ uint threads = thread_count_omp(stream); uint bx = (nx + 3) / 4; uint by = (ny + 3) / 4; uint bz = (nz + 3) / 4; uint blocks = bx * by * bz; uint chunks = chunk_count_omp(stream, blocks, threads); /* allocate per-thread streams */ bitstream** bs = compress_init_par(stream, field, chunks, blocks); if (!bs) return; /* compress chunks of blocks in parallel */ int chunk; #pragma omp parallel for num_threads(threads) for (chunk = 0; chunk < (int)chunks; chunk++) { /* determine range of block indices assigned to this thread */ uint bmin = chunk_offset(blocks, chunks, chunk + 0); uint bmax = chunk_offset(blocks, chunks, chunk + 1); uint block; /* set up thread-local bit stream */ zfp_stream s = *stream; zfp_stream_set_bit_stream(&s, bs[chunk]); /* compress sequence of blocks */ for (block = bmin; block < bmax; block++) { /* determine block origin (x, y, z) within array */ const Scalar* p = data; uint b = block; uint x, y, z; x = 4 * (b % bx); b /= bx; y = 4 * (b % by); b /= by; z = 4 * b; p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y + sz * (ptrdiff_t)z; /* compress partial or full block */ if (nx - x < 4 || ny - y < 4 || nz - z < 4) _t2(zfpns.zfp_encode_partial_block_strided, Scalar, 3)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), sx, sy, sz); else _t2(zfpns.zfp_encode_block_strided, Scalar, 3)(&s, p, sx, sy, sz); } } /* concatenate per-thread streams */ compress_finish_par(stream, bs, chunks); } /* compress 4d strided array in parallel */ static void _t2(compress_strided_omp, Scalar, 4)(zfp_stream* stream, const zfp_field* field) { /* array metadata */ const Scalar* data = field->data; uint nx = field->nx; uint ny = field->ny; uint nz = field->nz; uint nw = field->nw; int sx = field->sx ? field->sx : 1; int sy = field->sy ? field->sy : (int)nx; int sz = field->sz ? field->sz : (int)(nx * ny); int sw = field->sw ? field->sw : (int)(nx * ny * nz); /* number of omp threads, blocks, and chunks */ uint threads = thread_count_omp(stream); uint bx = (nx + 3) / 4; uint by = (ny + 3) / 4; uint bz = (nz + 3) / 4; uint bw = (nw + 3) / 4; uint blocks = bx * by * bz * bw; uint chunks = chunk_count_omp(stream, blocks, threads); /* allocate per-thread streams */ bitstream** bs = compress_init_par(stream, field, chunks, blocks); if (!bs) return; /* compress chunks of blocks in parallel */ int chunk; #pragma omp parallel for num_threads(threads) for (chunk = 0; chunk < (int)chunks; chunk++) { /* determine range of block indices assigned to this thread */ uint bmin = chunk_offset(blocks, chunks, chunk + 0); uint bmax = chunk_offset(blocks, chunks, chunk + 1); uint block; /* set up thread-local bit stream */ zfp_stream s = *stream; zfp_stream_set_bit_stream(&s, bs[chunk]); /* compress sequence of blocks */ for (block = bmin; block < bmax; block++) { /* determine block origin (x, y, z, w) within array */ const Scalar* p = data; uint b = block; uint x, y, z, w; x = 4 * (b % bx); b /= bx; y = 4 * (b % by); b /= by; z = 4 * (b % bz); b /= bz; w = 4 * b; p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y + sz * (ptrdiff_t)z + sw * (ptrdiff_t)w; /* compress partial or full block */ if (nx - x < 4 || ny - y < 4 || nz - z < 4 || nw - w < 4) _t2(zfpns.zfp_encode_partial_block_strided, Scalar, 4)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), MIN(nw - w, 4u), sx, sy, sz, sw); else _t2(zfpns.zfp_encode_block_strided, Scalar, 4)(&s, p, sx, sy, sz, sw); } } /* concatenate per-thread streams */ compress_finish_par(stream, bs, chunks); } #endif
GB_unaryop__lnot_int8_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int8_uint16 // op(A') function: GB_tran__lnot_int8_uint16 // C type: int8_t // A type: uint16_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT8 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int8_uint16 ( int8_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int8_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB052-indirectaccesssharebase-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This example is to mimic a memory access pattern extracted from an LLNL proxy app. Two pointers have distance of 12. They are used as base addresses of two arrays, indexed through an index set. The index set has no two indices with distance of 12. So there is no loop carried dependence. */ #include "omprace.h" #include <omp.h> #include <assert.h> #include <stdio.h> #include <stdlib.h> #define N 180 int indexSet[N] = { 521, 523, 525, 527, 529, 531, 547, 549, 551, 553, 555, 557, 573, 575, 577, 579, 581, 583, 599, 601, 603, 605, 607, 609, 625, 627, 629, 631, 633, 635, 651, 653, 655, 657, 659, 661, 859, 861, 863, 865, 867, 869, 885, 887, 889, 891, 893, 895, 911, 913, 915, 917, 919, 921, 937, 939, 941, 943, 945, 947, 963, 965, 967, 969, 971, 973, 989, 991, 993, 995, 997, 999, 1197, 1199, 1201, 1203, 1205, 1207, 1223, 1225, 1227, 1229, 1231, 1233, 1249, 1251, 1253, 1255, 1257, 1259, 1275, 1277, 1279, 1281, 1283, 1285, 1301, 1303, 1305, 1307, 1309, 1311, 1327, 1329, 1331, 1333, 1335, 1337, 1535, 1537, 1539, 1541, 1543, 1545, 1561, 1563, 1565, 1567, 1569, 1571, 1587, 1589, 1591, 1593, 1595, 1597, 1613, 1615, 1617, 1619, 1621, 1623, 1639, 1641, 1643, 1645, 1647, 1649, 1665, 1667, 1669, 1671, 1673, 1675, 1873, 1875, 1877, 1879, 1881, 1883, 1899, 1901, 1903, 1905, 1907, 1909, 1925, 1927, 1929, 1931, 1933, 1935, 1951, 1953, 1955, 1957, 1959, 1961, 1977, 1979, 1981, 1983, 1985, 1987, 2003, 2005, 2007, 2009, 2011, 2013}; int main (int argc, char* argv[]) { omprace_init(); double * base = (double*) malloc(sizeof(double)* (2013+12+1)); if (base == 0) { printf("Error, malloc() returns NULL. End execution. \n"); return 1; } double * xa1 = base; double * xa2 = base + 12; int i; for (i =521; i<= 2025; ++i) { base[i]=0.0; } #pragma omp parallel for for (i =0; i< N; ++i) // this level of loop has no loop carried dependence { int idx = indexSet[i]; xa1[idx]+= 1.0; xa2[idx]+= 3.0; } // verify the results, no overlapping of xa1 vs. xa2, no addition happens to the same element twice for (i =521; i<= 2025; ++i) { //printf ("%f ", base[i]); assert (base[i]!=4.0); } free (base); omprace_fini(); return 0; }
GB_binop__plus_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_int8) // A.*B function (eWiseMult): GB (_AemultB_08__plus_int8) // A.*B function (eWiseMult): GB (_AemultB_02__plus_int8) // A.*B function (eWiseMult): GB (_AemultB_04__plus_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_int8) // A*D function (colscale): GB (_AxD__plus_int8) // D*A function (rowscale): GB (_DxB__plus_int8) // C+=B function (dense accum): GB (_Cdense_accumB__plus_int8) // C+=b function (dense accum): GB (_Cdense_accumb__plus_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_int8) // C=scalar+B GB (_bind1st__plus_int8) // C=scalar+B' GB (_bind1st_tran__plus_int8) // C=A+scalar GB (_bind2nd__plus_int8) // C=A'+scalar GB (_bind2nd_tran__plus_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x + y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_INT8 || GxB_NO_PLUS_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__plus_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__plus_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB (_bind1st_tran__plus_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB (_bind2nd_tran__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB017-outputdep-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The loop in this example cannot be parallelized. Data race pairs: we allow two pairs to preserve the original code pattern. 1. x@71:12 vs. x@72:5 2. x@72:5 vs. x@72:5 */ #include <stdio.h> #include <stdlib.h> int main(int argc, char* argv[]) { int len=100; if (argc>1) len = atoi(argv[1]); int a[len]; int i,x=10; #pragma omp parallel for firstprivate(len ,a ,i ) lastprivate(i ) for (i=0;i<len;i++) { a[i] = x; x=i; } printf("x=%d, a[0]=%d\n",x,a[0]); return 0; }
par_csr_matop_device.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "_hypre_parcsr_mv.h" #include "_hypre_lapack.h" #include "_hypre_blas.h" #include "_hypre_utilities.hpp" #if defined(HYPRE_USING_CUDA) HYPRE_Int hypre_ParcsrGetExternalRowsDeviceInit( hypre_ParCSRMatrix *A, HYPRE_Int indices_len, HYPRE_Int *indices, hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int want_data, void **request_ptr) { HYPRE_Int i, j; HYPRE_Int num_sends, num_rows_send, num_nnz_send, num_recvs, num_rows_recv, num_nnz_recv; HYPRE_Int *d_send_i, *send_i, *d_send_map, *d_recv_i, *recv_i; HYPRE_BigInt *d_send_j, *d_recv_j; HYPRE_Int *send_jstarts, *recv_jstarts; HYPRE_Complex *d_send_a = NULL, *d_recv_a = NULL; hypre_ParCSRCommPkg *comm_pkg_j; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; /* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */ /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */ /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /* HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A); */ /* HYPRE_Int first_row = hypre_ParCSRMatrixFirstRowIndex(A); */ HYPRE_Int first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *d_col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs; HYPRE_Int my_id; void **vrequest; hypre_CSRMatrix *A_ext; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* number of sends (#procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); /* must be true if indices contains proper offd indices */ hypre_assert(indices_len == num_rows_recv); /* send_i/recv_i: * the arrays to send and recv: we first send and recv the row lengths */ d_send_i = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_DEVICE); d_send_map = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_DEVICE); send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST); recv_i = hypre_TAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST); d_recv_i = hypre_TAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_DEVICE); /* fill the send array with row lengths */ hypre_TMemcpy(d_send_map, hypre_ParCSRCommPkgSendMapElmts(comm_pkg), HYPRE_Int, num_rows_send, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_Memset(d_send_i, 0, sizeof(HYPRE_Int), HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(num_rows_send, d_send_map, A_diag_i, A_offd_i, d_send_i+1); /* send array send_i out: deviceTohost first and MPI (async) * note the shift in recv_i by one */ hypre_TMemcpy(send_i, d_send_i+1, HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i+1); hypreDevice_IntegerInclusiveScan(num_rows_send + 1, d_send_i); /* total number of nnz to send */ hypre_TMemcpy(&num_nnz_send, d_send_i+num_rows_send, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); /* prepare data to send out. overlap with the above commmunication */ d_send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_DEVICE); if (want_data) { d_send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_DEVICE); } if (d_col_map_offd_A == NULL) { d_col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(d_col_map_offd_A, col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = d_col_map_offd_A; } /* job == 2, d_send_i is input that contains row ptrs (length num_rows_send) */ hypreDevice_CopyParCSRRows(num_rows_send, d_send_map, 2, num_procs > 1, first_col, d_col_map_offd_A, A_diag_i, A_diag_j, A_diag_a, A_offd_i, A_offd_j, A_offd_a, d_send_i, d_send_j, d_send_a); /* pointers to each proc in send_j */ send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); send_jstarts[0] = 0; for (i = 1; i <= num_sends; i++) { send_jstarts[i] = send_jstarts[i-1]; for ( j = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i-1); j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); j++ ) { send_jstarts[i] += send_i[j]; } } hypre_assert(send_jstarts[num_sends] == num_nnz_send); /* finish the above communication: send_i/recv_i */ hypre_ParCSRCommHandleDestroy(comm_handle); /* adjust recv_i to ptrs */ recv_i[0] = 0; for (i = 1; i <= num_rows_recv; i++) { recv_i[i] += recv_i[i-1]; } num_nnz_recv = recv_i[num_rows_recv]; /* allocate device memory for j and a */ d_recv_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_DEVICE); if (want_data) { d_recv_a = hypre_TAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_DEVICE); } recv_jstarts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); recv_jstarts[0] = 0; for (i = 1; i <= num_recvs; i++) { j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); recv_jstarts[i] = recv_i[j]; } /* ready to send and recv: create a communication package for data */ comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm (comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts; hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts; /* init communication */ /* ja */ comm_handle_j = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg_j, HYPRE_MEMORY_DEVICE, d_send_j, HYPRE_MEMORY_DEVICE, d_recv_j); if (want_data) { /* a */ comm_handle_a = hypre_ParCSRCommHandleCreate_v2(1, comm_pkg_j, HYPRE_MEMORY_DEVICE, d_send_a, HYPRE_MEMORY_DEVICE, d_recv_a); } else { comm_handle_a = NULL; } hypre_TMemcpy(d_recv_i, recv_i, HYPRE_Int, num_rows_recv+1, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); /* create A_ext: on device */ A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv); hypre_CSRMatrixI (A_ext) = d_recv_i; hypre_CSRMatrixBigJ(A_ext) = d_recv_j; hypre_CSRMatrixData(A_ext) = d_recv_a; hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_DEVICE; /* output */ vrequest = hypre_TAlloc(void *, 3, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) A_ext; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(send_i, HYPRE_MEMORY_HOST); hypre_TFree(recv_i, HYPRE_MEMORY_HOST); hypre_TFree(d_send_i, HYPRE_MEMORY_DEVICE); hypre_TFree(d_send_map, HYPRE_MEMORY_DEVICE); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParcsrGetExternalRowsDeviceWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2]; HYPRE_BigInt *send_j = comm_handle_j ? (HYPRE_BigInt *) hypre_ParCSRCommHandleSendData(comm_handle_j) : NULL; HYPRE_Complex *send_a = comm_handle_a ? (HYPRE_Complex *) hypre_ParCSRCommHandleSendData(comm_handle_a) : NULL; hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(send_j, HYPRE_MEMORY_DEVICE); hypre_TFree(send_a, HYPRE_MEMORY_DEVICE); hypre_TFree(request, HYPRE_MEMORY_HOST); return A_ext; } hypre_CSRMatrix* hypre_MergeDiagAndOffdDevice(hypre_ParCSRMatrix *A) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt glbal_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_BigInt *d_col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); hypre_CSRMatrix *B; HYPRE_Int B_nrows = local_num_rows; HYPRE_BigInt B_ncols = glbal_num_cols; HYPRE_Int *B_i = hypre_TAlloc(HYPRE_Int, B_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *B_j; HYPRE_Complex *B_a; HYPRE_Int B_nnz; HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); hypre_Memset(B_i, 0, sizeof(HYPRE_Int), HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(B_nrows, NULL, A_diag_i, A_offd_i, B_i+1); hypreDevice_IntegerInclusiveScan(B_nrows+1, B_i); /* total number of nnz */ hypre_TMemcpy(&B_nnz, B_i+B_nrows, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); B_j = hypre_TAlloc(HYPRE_BigInt, B_nnz, HYPRE_MEMORY_DEVICE); B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); if (d_col_map_offd_A == NULL) { d_col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(d_col_map_offd_A, col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = d_col_map_offd_A; } hypreDevice_CopyParCSRRows(B_nrows, NULL, 2, num_procs > 1, first_col, d_col_map_offd_A, A_diag_i, A_diag_j, A_diag_a, A_offd_i, A_offd_j, A_offd_a, B_i, B_j, B_a); /* output */ B = hypre_CSRMatrixCreate(B_nrows, B_ncols, B_nnz); hypre_CSRMatrixI (B) = B_i; hypre_CSRMatrixBigJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; hypre_SyncCudaComputeStream(hypre_handle()); return B; } HYPRE_Int hypre_ExchangeExternalRowsDeviceInit( hypre_CSRMatrix *B_ext, hypre_ParCSRCommPkg *comm_pkg_A, void **request_ptr) { MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int num_elmts_send = send_map_starts[num_sends]; HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs]; HYPRE_Int *B_ext_i_d = hypre_CSRMatrixI(B_ext); HYPRE_BigInt *B_ext_j_d = hypre_CSRMatrixBigJ(B_ext); HYPRE_Complex *B_ext_a_d = hypre_CSRMatrixData(B_ext); HYPRE_Int B_ext_ncols = hypre_CSRMatrixNumCols(B_ext); HYPRE_Int B_ext_nrows = hypre_CSRMatrixNumRows(B_ext); HYPRE_Int B_ext_nnz = hypre_CSRMatrixNumNonzeros(B_ext); HYPRE_Int *B_ext_rownnz_d = hypre_TAlloc(HYPRE_Int, B_ext_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_ext_rownnz_h = hypre_TAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST); HYPRE_Int *B_ext_i_h = hypre_TAlloc(HYPRE_Int, B_ext_nrows + 1, HYPRE_MEMORY_HOST); hypre_assert(num_elmts_recv == B_ext_nrows); /* output matrix */ hypre_CSRMatrix *B_int_d; HYPRE_Int B_int_nrows = num_elmts_send; HYPRE_Int B_int_ncols = B_ext_ncols; HYPRE_Int *B_int_i_h = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST); HYPRE_Int *B_int_i_d = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *B_int_j_d = NULL; HYPRE_Complex *B_int_a_d = NULL; HYPRE_Int B_int_nnz; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; hypre_ParCSRCommPkg *comm_pkg_j; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i; HYPRE_Int num_procs, my_id; void **vrequest; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * B_ext_rownnz contains the number of elements of row j * (to be determined through send_map_elmnts on the receiving end) *--------------------------------------------------------------------------*/ HYPRE_THRUST_CALL(adjacent_difference, B_ext_i_d, B_ext_i_d + B_ext_nrows + 1, B_ext_rownnz_d); hypre_TMemcpy(B_ext_rownnz_h, B_ext_rownnz_d + 1, HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); /*-------------------------------------------------------------------------- * initialize communication: send/recv the row nnz * (note the use of comm_pkg_A, mode 12, as in transpose matvec *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz_h, B_int_i_h + 1); jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts[0] = 0; B_ext_i_h[0] = 0; hypre_TMemcpy(B_ext_i_h + 1, B_ext_rownnz_h, HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); for (i = 1; i <= B_ext_nrows; i++) { B_ext_i_h[i] += B_ext_i_h[i-1]; } hypre_assert(B_ext_i_h[B_ext_nrows] == B_ext_nnz); for (i = 1; i <= num_recvs; i++) { jdata_recv_vec_starts[i] = B_ext_i_h[recv_vec_starts[i]]; } comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs; hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs; hypre_ParCSRCommHandleDestroy(comm_handle); /*-------------------------------------------------------------------------- * compute B_int: row nnz to row ptrs *--------------------------------------------------------------------------*/ B_int_i_h[0] = 0; for (i = 1; i <= B_int_nrows; i++) { B_int_i_h[i] += B_int_i_h[i-1]; } B_int_nnz = B_int_i_h[B_int_nrows]; B_int_j_d = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_DEVICE); B_int_a_d = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_DEVICE); for (i = 0; i <= num_sends; i++) { jdata_send_map_starts[i] = B_int_i_h[send_map_starts[i]]; } /* note the order of send/recv is reversed */ hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts; /* send/recv CSR rows */ comm_handle_a = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg_j, HYPRE_MEMORY_DEVICE, B_ext_a_d, HYPRE_MEMORY_DEVICE, B_int_a_d ); comm_handle_j = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg_j, HYPRE_MEMORY_DEVICE, B_ext_j_d, HYPRE_MEMORY_DEVICE, B_int_j_d ); hypre_TMemcpy(B_int_i_d, B_int_i_h, HYPRE_Int, B_int_nrows+1, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); /* create CSR: on device */ B_int_d = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz); hypre_CSRMatrixI(B_int_d) = B_int_i_d; hypre_CSRMatrixBigJ(B_int_d) = B_int_j_d; hypre_CSRMatrixData(B_int_d) = B_int_a_d; hypre_CSRMatrixMemoryLocation(B_int_d) = HYPRE_MEMORY_DEVICE; /* output */ vrequest = hypre_TAlloc(void *, 3, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) B_int_d; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(B_ext_rownnz_d, HYPRE_MEMORY_DEVICE); hypre_TFree(B_ext_rownnz_h, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_i_h, HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ExchangeExternalRowsDeviceWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *B_int_d = (hypre_CSRMatrix *) request[2]; /* communication done */ hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(request, HYPRE_MEMORY_HOST); return B_int_d; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ HYPRE_Int hypre_ParCSRMatrixExtractBExtDeviceInit( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data, void **request_ptr) { hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) == hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) ); /* hypre_assert( hypre_GetActualMemLocation( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B))) == HYPRE_MEMORY_DEVICE ); */ hypre_ParcsrGetExternalRowsDeviceInit(B, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixCommPkg(A), want_data, request_ptr); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParCSRMatrixExtractBExtDeviceWait(void *request) { return hypre_ParcsrGetExternalRowsDeviceWait(request); } hypre_CSRMatrix* hypre_ParCSRMatrixExtractBExtDevice( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data ) { void *request; hypre_ParCSRMatrixExtractBExtDeviceInit(B, A, want_data, &request); return hypre_ParCSRMatrixExtractBExtDeviceWait(request); } /*--------------------------- *---------------------------*/ typedef thrust::tuple<HYPRE_Int, HYPRE_Int> Tuple; //typedef thrust::tuple<HYPRE_Int, HYPRE_Int, HYPRe_Int> Tuple3; struct FFFC_functor : public thrust::unary_function<Tuple, HYPRE_BigInt> { HYPRE_BigInt CF_first[2]; FFFC_functor(HYPRE_BigInt F_first_, HYPRE_BigInt C_first_) { CF_first[1] = F_first_; CF_first[0] = C_first_; } __host__ __device__ HYPRE_BigInt operator()(const Tuple& t) const { const HYPRE_Int local_idx = thrust::get<0>(t); const HYPRE_Int cf_marker = thrust::get<1>(t); const HYPRE_Int s = cf_marker < 0; const HYPRE_Int m = 1 - 2*s; return m*(local_idx + CF_first[s] + s); } }; template<bool FCOL, typename T> struct FFFC_pred : public thrust::unary_function<Tuple, bool> { HYPRE_Int *row_CF_marker; T *col_CF_marker; FFFC_pred(HYPRE_Int *row_CF_marker_, T *col_CF_marker_) { row_CF_marker = row_CF_marker_; col_CF_marker = col_CF_marker_; } __host__ __device__ bool operator()(const Tuple& t) const { const HYPRE_Int i = thrust::get<0>(t); const HYPRE_Int j = thrust::get<1>(t); if (FCOL) { /* AFF */ return row_CF_marker[i] < 0 && (j == -2 || j >= 0 && col_CF_marker[j] < 0); } else { /* AFC */ return row_CF_marker[i] < 0 && (j >= 0 && col_CF_marker[j] >= 0); } } }; HYPRE_Int hypre_ParCSRMatrixGenerateFFFCDevice( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker_host, HYPRE_BigInt *cpts_starts, hypre_ParCSRMatrix *S, hypre_ParCSRMatrix **AFC_ptr, hypre_ParCSRMatrix **AFF_ptr ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); HYPRE_Int num_elem_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); //HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag); /* offd part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); //HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); /* SoC */ HYPRE_Int *Soc_diag_j = hypre_ParCSRMatrixSocDiagJ(S); HYPRE_Int *Soc_offd_j = hypre_ParCSRMatrixSocOffdJ(S); /* MPI size and rank*/ HYPRE_Int my_id, num_procs; /* nF and nC */ HYPRE_Int n_local, nF_local, nC_local; HYPRE_BigInt *fpts_starts, *row_starts; HYPRE_BigInt n_global, nF_global, nC_global; HYPRE_BigInt F_first, C_first; HYPRE_Int *CF_marker; /* AFF */ HYPRE_Int AFF_diag_nnz, AFF_offd_nnz; HYPRE_Int *AFF_diag_ii, *AFF_diag_i, *AFF_diag_j; HYPRE_Complex *AFF_diag_a; HYPRE_Int *AFF_offd_ii, *AFF_offd_i, *AFF_offd_j; HYPRE_Complex *AFF_offd_a; hypre_ParCSRMatrix *AFF; hypre_CSRMatrix *AFF_diag, *AFF_offd; HYPRE_BigInt *col_map_offd_AFF; HYPRE_Int num_cols_AFF_offd; /* AFC */ HYPRE_Int AFC_diag_nnz, AFC_offd_nnz; HYPRE_Int *AFC_diag_ii, *AFC_diag_i, *AFC_diag_j; HYPRE_Complex *AFC_diag_a; HYPRE_Int *AFC_offd_ii, *AFC_offd_i, *AFC_offd_j; HYPRE_Complex *AFC_offd_a; hypre_ParCSRMatrix *AFC; hypre_CSRMatrix *AFC_diag, *AFC_offd; HYPRE_BigInt *col_map_offd_AFC; HYPRE_Int num_cols_AFC_offd; /* work arrays */ HYPRE_Int *map2FC, *itmp, *A_diag_ii, *A_offd_ii, *tmp_j, *offd_mark; HYPRE_BigInt *send_buf, *recv_buf; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); n_global = hypre_ParCSRMatrixGlobalNumRows(A); n_local = hypre_ParCSRMatrixNumRows(A); row_starts = hypre_ParCSRMatrixRowStarts(A); map2FC = hypre_TAlloc(HYPRE_Int, n_local, HYPRE_MEMORY_DEVICE); itmp = hypre_TAlloc(HYPRE_Int, n_local, HYPRE_MEMORY_DEVICE);; recv_buf = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); #ifdef HYPRE_NO_GLOBAL_PARTITION if (my_id == (num_procs -1)) { nC_global = cpts_starts[1]; } hypre_MPI_Bcast(&nC_global, 1, HYPRE_MPI_INT, num_procs-1, comm); nC_local = (HYPRE_Int) (cpts_starts[1] - cpts_starts[0]); fpts_starts = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); fpts_starts[0] = row_starts[0] - cpts_starts[0]; fpts_starts[1] = row_starts[1] - cpts_starts[1]; F_first = fpts_starts[0]; C_first = cpts_starts[0]; #else nC_global = cpts_starts[num_procs]; nC_local = (HYPRE_Int)(cpts_starts[my_id+1] - cpts_starts[my_id]); fpts_starts = hypre_TAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= num_procs; i++) { fpts_starts[i] = row_starts[i] - cpts_starts[i]; } F_first = fpts_starts[myid]; C_first = cpts_starts[myid]; #endif nF_local = n_local - nC_local; nF_global = n_global - nC_global; CF_marker = hypre_TAlloc(HYPRE_Int, n_local, HYPRE_MEMORY_DEVICE); hypre_TMemcpy( CF_marker, CF_marker_host, HYPRE_Int, n_local, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST ); /* map from F+C to F/C indices */ HYPRE_THRUST_CALL( exclusive_scan, thrust::make_transform_iterator(CF_marker, is_negative<HYPRE_Int>()), thrust::make_transform_iterator(CF_marker + n_local, is_negative<HYPRE_Int>()), map2FC ); /* F */ HYPRE_THRUST_CALL( exclusive_scan, thrust::make_transform_iterator(CF_marker, is_nonnegative<HYPRE_Int>()), thrust::make_transform_iterator(CF_marker + n_local, is_nonnegative<HYPRE_Int>()), itmp ); /* C */ HYPRE_THRUST_CALL( scatter_if, itmp, itmp + n_local, thrust::counting_iterator<HYPRE_Int>(0), thrust::make_transform_iterator(CF_marker, is_nonnegative<HYPRE_Int>()), map2FC ); /* FC combined */ hypre_TFree(itmp, HYPRE_MEMORY_DEVICE); /* send_buf: global F/C indices. Note F-pts are saved as "-x-1" */ send_buf = hypre_TAlloc(HYPRE_BigInt, num_elem_send, HYPRE_MEMORY_DEVICE); hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg); FFFC_functor functor(F_first, C_first); HYPRE_THRUST_CALL( gather, hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg), hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg) + num_elem_send, thrust::make_transform_iterator(thrust::make_zip_iterator(thrust::make_tuple(map2FC, CF_marker)), functor), send_buf ); comm_handle = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg, HYPRE_MEMORY_DEVICE, send_buf, HYPRE_MEMORY_DEVICE, recv_buf); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(send_buf, HYPRE_MEMORY_DEVICE); /* Diag */ thrust::zip_iterator< thrust::tuple<HYPRE_Int*, HYPRE_Int*, HYPRE_Complex*> > new_end; A_diag_ii = hypre_TAlloc(HYPRE_Int, A_diag_nnz, HYPRE_MEMORY_DEVICE); hypreDevice_CsrRowPtrsToIndices_v2(n_local, A_diag_nnz, A_diag_i, A_diag_ii); /* AFF Diag */ FFFC_pred<true, HYPRE_Int> AFF_pred_diag(CF_marker, CF_marker); AFF_diag_nnz = HYPRE_THRUST_CALL( count_if, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, Soc_diag_j)), thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, Soc_diag_j)) + A_diag_nnz, AFF_pred_diag ); AFF_diag_ii = hypre_TAlloc(HYPRE_Int, AFF_diag_nnz, HYPRE_MEMORY_DEVICE); AFF_diag_j = hypre_TAlloc(HYPRE_Int, AFF_diag_nnz, HYPRE_MEMORY_DEVICE); AFF_diag_a = hypre_TAlloc(HYPRE_Complex, AFF_diag_nnz, HYPRE_MEMORY_DEVICE); new_end = HYPRE_THRUST_CALL( copy_if, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)), thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)) + A_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, Soc_diag_j)), thrust::make_zip_iterator(thrust::make_tuple(AFF_diag_ii, AFF_diag_j, AFF_diag_a)), AFF_pred_diag ); hypre_assert( thrust::get<0>(new_end.get_iterator_tuple()) == AFF_diag_ii + AFF_diag_nnz ); HYPRE_THRUST_CALL ( gather, AFF_diag_j, AFF_diag_j + AFF_diag_nnz, map2FC, AFF_diag_j ); HYPRE_THRUST_CALL ( gather, AFF_diag_ii, AFF_diag_ii + AFF_diag_nnz, map2FC, AFF_diag_ii ); AFF_diag_i = hypreDevice_CsrRowIndicesToPtrs(nF_local, AFF_diag_nnz, AFF_diag_ii); hypre_TFree(AFF_diag_ii, HYPRE_MEMORY_DEVICE); /* AFC Diag */ FFFC_pred<false, HYPRE_Int> AFC_pred_diag(CF_marker, CF_marker); AFC_diag_nnz = HYPRE_THRUST_CALL( count_if, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, Soc_diag_j)), thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, Soc_diag_j)) + A_diag_nnz, AFC_pred_diag ); AFC_diag_ii = hypre_TAlloc(HYPRE_Int, AFC_diag_nnz, HYPRE_MEMORY_DEVICE); AFC_diag_j = hypre_TAlloc(HYPRE_Int, AFC_diag_nnz, HYPRE_MEMORY_DEVICE); AFC_diag_a = hypre_TAlloc(HYPRE_Complex, AFC_diag_nnz, HYPRE_MEMORY_DEVICE); new_end = HYPRE_THRUST_CALL( copy_if, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, Soc_diag_j, A_diag_a)), thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, Soc_diag_j, A_diag_a)) + A_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, Soc_diag_j)), thrust::make_zip_iterator(thrust::make_tuple(AFC_diag_ii, AFC_diag_j, AFC_diag_a)), AFC_pred_diag ); hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE); hypre_assert( thrust::get<0>(new_end.get_iterator_tuple()) == AFC_diag_ii + AFC_diag_nnz ); HYPRE_THRUST_CALL ( gather, AFC_diag_j, AFC_diag_j + AFC_diag_nnz, map2FC, AFC_diag_j ); HYPRE_THRUST_CALL ( gather, AFC_diag_ii, AFC_diag_ii + AFC_diag_nnz, map2FC, AFC_diag_ii ); AFC_diag_i = hypreDevice_CsrRowIndicesToPtrs(nF_local, AFC_diag_nnz, AFC_diag_ii); hypre_TFree(AFC_diag_ii, HYPRE_MEMORY_DEVICE); /* Offd */ A_offd_ii = hypre_TAlloc(HYPRE_Int, A_offd_nnz, HYPRE_MEMORY_DEVICE); hypreDevice_CsrRowPtrsToIndices_v2(n_local, A_offd_nnz, A_offd_i, A_offd_ii); /* AFF Offd */ FFFC_pred<true, HYPRE_BigInt> AFF_pred_offd(CF_marker, recv_buf); AFF_offd_nnz = HYPRE_THRUST_CALL( count_if, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, Soc_offd_j)), thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, Soc_offd_j)) + A_offd_nnz, AFF_pred_offd ); AFF_offd_ii = hypre_TAlloc(HYPRE_Int, AFF_offd_nnz, HYPRE_MEMORY_DEVICE); AFF_offd_j = hypre_TAlloc(HYPRE_Int, AFF_offd_nnz, HYPRE_MEMORY_DEVICE); AFF_offd_a = hypre_TAlloc(HYPRE_Complex, AFF_offd_nnz, HYPRE_MEMORY_DEVICE); new_end = HYPRE_THRUST_CALL( copy_if, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, Soc_offd_j, A_offd_a)), thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, Soc_offd_j, A_offd_a)) + A_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, Soc_offd_j)), thrust::make_zip_iterator(thrust::make_tuple(AFF_offd_ii, AFF_offd_j, AFF_offd_a)), AFF_pred_offd ); hypre_assert( thrust::get<0>(new_end.get_iterator_tuple()) == AFF_offd_ii + AFF_offd_nnz ); HYPRE_THRUST_CALL ( gather, AFF_offd_ii, AFF_offd_ii + AFF_offd_nnz, map2FC, AFF_offd_ii ); AFF_offd_i = hypreDevice_CsrRowIndicesToPtrs(nF_local, AFF_offd_nnz, AFF_offd_ii); hypre_TFree(AFF_offd_ii, HYPRE_MEMORY_DEVICE); /* AFC Offd */ FFFC_pred<false, HYPRE_BigInt> AFC_pred_offd(CF_marker, recv_buf); AFC_offd_nnz = HYPRE_THRUST_CALL( count_if, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, Soc_offd_j)), thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, Soc_offd_j)) + A_offd_nnz, AFC_pred_offd ); AFC_offd_ii = hypre_TAlloc(HYPRE_Int, AFC_offd_nnz, HYPRE_MEMORY_DEVICE); AFC_offd_j = hypre_TAlloc(HYPRE_Int, AFC_offd_nnz, HYPRE_MEMORY_DEVICE); AFC_offd_a = hypre_TAlloc(HYPRE_Complex, AFC_offd_nnz, HYPRE_MEMORY_DEVICE); new_end = HYPRE_THRUST_CALL( copy_if, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, Soc_offd_j, A_offd_a)), thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, Soc_offd_j, A_offd_a)) + A_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, Soc_offd_j)), thrust::make_zip_iterator(thrust::make_tuple(AFC_offd_ii, AFC_offd_j, AFC_offd_a)), AFC_pred_offd ); hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE); hypre_assert( thrust::get<0>(new_end.get_iterator_tuple()) == AFC_offd_ii + AFC_offd_nnz ); HYPRE_THRUST_CALL ( gather, AFC_offd_ii, AFC_offd_ii + AFC_offd_nnz, map2FC, AFC_offd_ii ); AFC_offd_i = hypreDevice_CsrRowIndicesToPtrs(nF_local, AFC_offd_nnz, AFC_offd_ii); hypre_TFree(AFC_offd_ii, HYPRE_MEMORY_DEVICE); hypre_TFree(CF_marker, HYPRE_MEMORY_DEVICE); hypre_TFree(map2FC, HYPRE_MEMORY_DEVICE); /* col_map_offd_AFF */ HYPRE_Int tmp_j_size = hypre_max(hypre_max(AFF_offd_nnz, AFC_offd_nnz), num_cols_A_offd); tmp_j = hypre_TAlloc(HYPRE_Int, tmp_j_size, HYPRE_MEMORY_DEVICE); offd_mark = hypre_TAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_DEVICE); HYPRE_Int *tmp_end; hypre_TMemcpy(tmp_j, AFF_offd_j, HYPRE_Int, AFF_offd_nnz, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL(sort, tmp_j, tmp_j + AFF_offd_nnz); tmp_end = HYPRE_THRUST_CALL(unique, tmp_j, tmp_j + AFF_offd_nnz); num_cols_AFF_offd = tmp_end - tmp_j; HYPRE_THRUST_CALL(fill_n, offd_mark, num_cols_A_offd, 0); hypreDevice_ScatterConstant(offd_mark, num_cols_AFF_offd, tmp_j, 1); HYPRE_THRUST_CALL(exclusive_scan, offd_mark, offd_mark + num_cols_A_offd, tmp_j); HYPRE_THRUST_CALL(gather, AFF_offd_j, AFF_offd_j + AFF_offd_nnz, tmp_j, AFF_offd_j); col_map_offd_AFF = hypre_TAlloc(HYPRE_Int, num_cols_AFF_offd, HYPRE_MEMORY_DEVICE); tmp_end = HYPRE_THRUST_CALL( copy_if, thrust::make_transform_iterator(recv_buf, -_1-1), thrust::make_transform_iterator(recv_buf, -_1-1) + num_cols_A_offd, offd_mark, col_map_offd_AFF, thrust::identity<HYPRE_Int>() ); hypre_assert(tmp_end - col_map_offd_AFF == num_cols_AFF_offd); /* col_map_offd_AFC */ hypre_TMemcpy(tmp_j, AFC_offd_j, HYPRE_Int, AFC_offd_nnz, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL(sort, tmp_j, tmp_j + AFC_offd_nnz); tmp_end = HYPRE_THRUST_CALL(unique, tmp_j, tmp_j + AFC_offd_nnz); num_cols_AFC_offd = tmp_end - tmp_j; HYPRE_THRUST_CALL(fill_n, offd_mark, num_cols_A_offd, 0); hypreDevice_ScatterConstant(offd_mark, num_cols_AFC_offd, tmp_j, 1); HYPRE_THRUST_CALL(exclusive_scan, offd_mark, offd_mark + num_cols_A_offd, tmp_j); HYPRE_THRUST_CALL(gather, AFC_offd_j, AFC_offd_j + AFC_offd_nnz, tmp_j, AFC_offd_j); col_map_offd_AFC = hypre_TAlloc(HYPRE_Int, num_cols_AFC_offd, HYPRE_MEMORY_DEVICE); tmp_end = HYPRE_THRUST_CALL( copy_if, recv_buf, recv_buf + num_cols_A_offd, offd_mark, col_map_offd_AFC, thrust::identity<HYPRE_Int>()); hypre_assert(tmp_end - col_map_offd_AFC == num_cols_AFC_offd); hypre_TFree(tmp_j, HYPRE_MEMORY_DEVICE); hypre_TFree(offd_mark, HYPRE_MEMORY_DEVICE); hypre_TFree(recv_buf, HYPRE_MEMORY_DEVICE); //printf("AFF_diag_nnz %d, AFF_offd_nnz %d, AFC_diag_nnz %d, AFC_offd_nnz %d\n", AFF_diag_nnz, AFF_offd_nnz, AFC_diag_nnz, AFC_offd_nnz); /* AFF */ AFF = hypre_ParCSRMatrixCreate(comm, nF_global, nF_global, fpts_starts, fpts_starts, num_cols_AFF_offd, AFF_diag_nnz, AFF_offd_nnz); hypre_ParCSRMatrixOwnsRowStarts(AFF) = 1; hypre_ParCSRMatrixOwnsColStarts(AFF) = 0; AFF_diag = hypre_ParCSRMatrixDiag(AFF); hypre_CSRMatrixData(AFF_diag) = AFF_diag_a; hypre_CSRMatrixI(AFF_diag) = AFF_diag_i; hypre_CSRMatrixJ(AFF_diag) = AFF_diag_j; AFF_offd = hypre_ParCSRMatrixOffd(AFF); hypre_CSRMatrixData(AFF_offd) = AFF_offd_a; hypre_CSRMatrixI(AFF_offd) = AFF_offd_i; hypre_CSRMatrixJ(AFF_offd) = AFF_offd_j; hypre_CSRMatrixMemoryLocation(AFF_diag) = HYPRE_MEMORY_DEVICE; hypre_CSRMatrixMemoryLocation(AFF_offd) = HYPRE_MEMORY_DEVICE; hypre_ParCSRMatrixDeviceColMapOffd(AFF) = col_map_offd_AFF; hypre_ParCSRMatrixColMapOffd(AFF) = hypre_TAlloc(HYPRE_BigInt, num_cols_AFF_offd, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(AFF), col_map_offd_AFF, HYPRE_BigInt, num_cols_AFF_offd, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_ParCSRMatrixSetNumNonzeros(AFF); hypre_ParCSRMatrixDNumNonzeros(AFF) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(AFF); hypre_MatvecCommPkgCreate(AFF); /* AFC */ AFC = hypre_ParCSRMatrixCreate(comm, nF_global, nC_global, fpts_starts, cpts_starts, num_cols_AFC_offd, AFC_diag_nnz, AFC_offd_nnz); hypre_ParCSRMatrixOwnsRowStarts(AFC) = 0; hypre_ParCSRMatrixOwnsColStarts(AFC) = 0; AFC_diag = hypre_ParCSRMatrixDiag(AFC); hypre_CSRMatrixData(AFC_diag) = AFC_diag_a; hypre_CSRMatrixI(AFC_diag) = AFC_diag_i; hypre_CSRMatrixJ(AFC_diag) = AFC_diag_j; AFC_offd = hypre_ParCSRMatrixOffd(AFC); hypre_CSRMatrixData(AFC_offd) = AFC_offd_a; hypre_CSRMatrixI(AFC_offd) = AFC_offd_i; hypre_CSRMatrixJ(AFC_offd) = AFC_offd_j; hypre_CSRMatrixMemoryLocation(AFC_diag) = HYPRE_MEMORY_DEVICE; hypre_CSRMatrixMemoryLocation(AFC_offd) = HYPRE_MEMORY_DEVICE; hypre_ParCSRMatrixDeviceColMapOffd(AFC) = col_map_offd_AFC; hypre_ParCSRMatrixColMapOffd(AFC) = hypre_TAlloc(HYPRE_BigInt, num_cols_AFC_offd, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(AFC), col_map_offd_AFC, HYPRE_BigInt, num_cols_AFC_offd, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_ParCSRMatrixSetNumNonzeros(AFC); hypre_ParCSRMatrixDNumNonzeros(AFC) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(AFC); hypre_MatvecCommPkgCreate(AFC); *AFC_ptr = AFC; *AFF_ptr = AFF; return hypre_error_flag; } /* return B = [Adiag, Aoffd] */ #if 1 __global__ void hypreCUDAKernel_ConcatDiagAndOffd(HYPRE_Int nrows, HYPRE_Int diag_ncol, HYPRE_Int *d_diag_i, HYPRE_Int *d_diag_j, HYPRE_Complex *d_diag_a, HYPRE_Int *d_offd_i, HYPRE_Int *d_offd_j, HYPRE_Complex *d_offd_a, HYPRE_Int *cols_offd_map, HYPRE_Int *d_ib, HYPRE_Int *d_jb, HYPRE_Complex *d_ab) { const HYPRE_Int row = hypre_cuda_get_grid_warp_id<1,1>(); if (row >= nrows) { return; } /* lane id inside the warp */ const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>(); HYPRE_Int i, j, k, p, istart, iend, bstart; /* diag part */ if (lane_id < 2) { j = read_only_load(d_diag_i + row + lane_id); } if (lane_id == 0) { k = read_only_load(d_ib + row); } istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); bstart = __shfl_sync(HYPRE_WARP_FULL_MASK, k, 0); p = bstart - istart; for (i = istart + lane_id; i < iend; i += HYPRE_WARP_SIZE) { d_jb[p+i] = read_only_load(d_diag_j + i); d_ab[p+i] = read_only_load(d_diag_a + i); } /* offd part */ if (lane_id < 2) { j = read_only_load(d_offd_i + row + lane_id); } bstart += iend - istart; istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); p = bstart - istart; for (i = istart + lane_id; i < iend; i += HYPRE_WARP_SIZE) { const HYPRE_Int t = read_only_load(d_offd_j + i); d_jb[p+i] = (cols_offd_map ? read_only_load(&cols_offd_map[t]) : t) + diag_ncol; d_ab[p+i] = read_only_load(d_offd_a + i); } } hypre_CSRMatrix* hypre_ConcatDiagAndOffdDevice(hypre_ParCSRMatrix *A) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *B = hypre_CSRMatrixCreate( hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag) + hypre_CSRMatrixNumCols(A_offd), hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd) ); hypre_CSRMatrixInitialize_v2(B, 0, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(hypre_CSRMatrixNumRows(B), NULL, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixI(B)); HYPRE_THRUST_CALL( exclusive_scan, hypre_CSRMatrixI(B), hypre_CSRMatrixI(B) + hypre_CSRMatrixNumRows(B) + 1, hypre_CSRMatrixI(B) ); const dim3 bDim = hypre_GetDefaultCUDABlockDimension(); const dim3 gDim = hypre_GetDefaultCUDAGridDimension(hypre_CSRMatrixNumRows(A_diag), "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag), hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixData(A_offd), NULL, hypre_CSRMatrixI(B), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); return B; } #else hypre_CSRMatrix* hypre_ConcatDiagAndOffdDevice(hypre_ParCSRMatrix *A) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd); hypre_CSRMatrix *B; HYPRE_Int B_nrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int B_ncols = hypre_CSRMatrixNumCols(A_diag) + hypre_CSRMatrixNumCols(A_offd); HYPRE_Int B_nnz = A_diag_nnz + A_offd_nnz; HYPRE_Int *B_ii = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_j = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Complex *B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); // Adiag HYPRE_Int *A_diag_ii = hypreDevice_CsrRowPtrsToIndices(B_nrows, A_diag_nnz, A_diag_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)), A_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_j, B_a)) ); hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE); // Aoffd HYPRE_Int *A_offd_ii = hypreDevice_CsrRowPtrsToIndices(B_nrows, A_offd_nnz, A_offd_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, A_offd_a)), A_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_a)) + A_diag_nnz ); hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, A_offd_j, A_offd_j + A_offd_nnz, thrust::make_constant_iterator(hypre_CSRMatrixNumCols(A_diag)), B_j + A_diag_nnz, thrust::plus<HYPRE_Int>() ); // B HYPRE_THRUST_CALL( stable_sort_by_key, B_ii, B_ii + B_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_j, B_a)) ); HYPRE_Int *B_i = hypreDevice_CsrRowIndicesToPtrs(B_nrows, B_nnz, B_ii); hypre_TFree(B_ii, HYPRE_MEMORY_DEVICE); B = hypre_CSRMatrixCreate(B_nrows, B_ncols, B_nnz); hypre_CSRMatrixI(B) = B_i; hypre_CSRMatrixJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; return B; } #endif /* return B = [Adiag, Aoffd; E] */ #if 1 HYPRE_Int hypre_ConcatDiagOffdAndExtDevice(hypre_ParCSRMatrix *A, hypre_CSRMatrix *E, hypre_CSRMatrix **B_ptr, HYPRE_Int *num_cols_offd_ptr, HYPRE_BigInt **cols_map_offd_ptr) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *E_diag, *E_offd, *B; HYPRE_Int *cols_offd_map, num_cols_offd; HYPRE_BigInt *cols_map_offd; hypre_CSRMatrixSplitDevice(E, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A), hypre_CSRMatrixNumCols(A_offd), hypre_ParCSRMatrixDeviceColMapOffd(A), &cols_offd_map, &num_cols_offd, &cols_map_offd, &E_diag, &E_offd); B = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumRows(A) + hypre_CSRMatrixNumRows(E), hypre_ParCSRMatrixNumCols(A) + num_cols_offd, hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd) + hypre_CSRMatrixNumNonzeros(E)); hypre_CSRMatrixInitialize_v2(B, 0, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(hypre_ParCSRMatrixNumRows(A), NULL, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixI(B)); HYPRE_THRUST_CALL( exclusive_scan, hypre_CSRMatrixI(B), hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(B) ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(hypre_ParCSRMatrixNumRows(A), "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag), hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixData(A_offd), cols_offd_map, hypre_CSRMatrixI(B), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); hypre_TFree(cols_offd_map, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(E) + 1, HYPRE_Int, hypre_CSRMatrixNumRows(E), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + hypre_CSRMatrixNumRows(E) + 1, thrust::make_constant_iterator(hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd)), hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, thrust::plus<HYPRE_Int>() ); gDim = hypre_GetDefaultCUDAGridDimension(hypre_CSRMatrixNumRows(E), "warp", bDim); hypre_assert(hypre_CSRMatrixNumCols(E_diag) == hypre_CSRMatrixNumCols(A_diag)); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(E_diag), hypre_CSRMatrixNumCols(E_diag), hypre_CSRMatrixI(E_diag), hypre_CSRMatrixJ(E_diag), hypre_CSRMatrixData(E_diag), hypre_CSRMatrixI(E_offd), hypre_CSRMatrixJ(E_offd), hypre_CSRMatrixData(E_offd), NULL, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); hypre_CSRMatrixDestroy(E_diag); hypre_CSRMatrixDestroy(E_offd); *B_ptr = B; *num_cols_offd_ptr = num_cols_offd; *cols_map_offd_ptr = cols_map_offd; return hypre_error_flag; } #else HYPRE_Int hypre_ConcatDiagOffdAndExtDevice(hypre_ParCSRMatrix *A, hypre_CSRMatrix *E, hypre_CSRMatrix **B_ptr, HYPRE_Int *num_cols_offd_ptr, HYPRE_BigInt **cols_map_offd_ptr) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int A_nrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int A_ncols = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd); HYPRE_BigInt first_col_A = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt last_col_A = hypre_ParCSRMatrixLastColDiag(A); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); HYPRE_Int *E_i = hypre_CSRMatrixI(E); HYPRE_BigInt *E_bigj = hypre_CSRMatrixBigJ(E); HYPRE_Complex *E_a = hypre_CSRMatrixData(E); HYPRE_Int E_nrows = hypre_CSRMatrixNumRows(E); HYPRE_Int E_nnz = hypre_CSRMatrixNumNonzeros(E); HYPRE_Int E_diag_nnz, E_offd_nnz; hypre_CSRMatrix *B; HYPRE_Int B_nnz = A_diag_nnz + A_offd_nnz + E_nnz; HYPRE_Int *B_ii = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_j = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Complex *B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); // E hypre_CSRMatrixSplitDevice_core(0, E_nrows, E_nnz, NULL, E_bigj, NULL, NULL, first_col_A, last_col_A, num_cols_offd_A, NULL, NULL, NULL, NULL, &E_diag_nnz, NULL, NULL, NULL, NULL, &E_offd_nnz, NULL, NULL, NULL, NULL); HYPRE_Int *cols_offd_map, num_cols_offd; HYPRE_BigInt *cols_map_offd; HYPRE_Int *E_ii = hypreDevice_CsrRowPtrsToIndices(E_nrows, E_nnz, E_i); hypre_CSRMatrixSplitDevice_core(1, E_nrows, E_nnz, E_ii, E_bigj, E_a, NULL, first_col_A, last_col_A, num_cols_offd_A, col_map_offd_A, &cols_offd_map, &num_cols_offd, &cols_map_offd, &E_diag_nnz, B_ii + A_diag_nnz + A_offd_nnz, B_j + A_diag_nnz + A_offd_nnz, B_a + A_diag_nnz + A_offd_nnz, NULL, &E_offd_nnz, B_ii + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_a + A_diag_nnz + A_offd_nnz + E_diag_nnz, NULL); hypre_TFree(E_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, B_ii + A_diag_nnz + A_offd_nnz, B_ii + B_nnz, thrust::make_constant_iterator(A_nrows), B_ii + A_diag_nnz + A_offd_nnz, thrust::plus<HYPRE_Int>() ); // Adiag HYPRE_Int *A_diag_ii = hypreDevice_CsrRowPtrsToIndices(A_nrows, A_diag_nnz, A_diag_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)), A_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_j, B_a)) ); hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE); // Aoffd HYPRE_Int *A_offd_ii = hypreDevice_CsrRowPtrsToIndices(A_nrows, A_offd_nnz, A_offd_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, A_offd_a)), A_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_a)) + A_diag_nnz ); hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( gather, A_offd_j, A_offd_j + A_offd_nnz, cols_offd_map, B_j + A_diag_nnz); hypre_TFree(cols_offd_map, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, B_j + A_diag_nnz, B_j + A_diag_nnz + A_offd_nnz, thrust::make_constant_iterator(A_ncols), B_j + A_diag_nnz, thrust::plus<HYPRE_Int>() ); HYPRE_THRUST_CALL( transform, B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_j + B_nnz, thrust::make_constant_iterator(A_ncols), B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, thrust::plus<HYPRE_Int>() ); // B HYPRE_THRUST_CALL( stable_sort_by_key, B_ii, B_ii + B_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_j, B_a)) ); HYPRE_Int *B_i = hypreDevice_CsrRowIndicesToPtrs(A_nrows + E_nrows, B_nnz, B_ii); hypre_TFree(B_ii, HYPRE_MEMORY_DEVICE); B = hypre_CSRMatrixCreate(A_nrows + E_nrows, A_ncols + num_cols_offd, B_nnz); hypre_CSRMatrixI(B) = B_i; hypre_CSRMatrixJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; *B_ptr = B; *num_cols_offd_ptr = num_cols_offd; *cols_map_offd_ptr = cols_map_offd; return hypre_error_flag; } #endif HYPRE_Int hypre_ParCSRMatrixGetRowDevice( hypre_ParCSRMatrix *mat, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { HYPRE_Int nrows, local_row; HYPRE_BigInt row_start, row_end; hypre_CSRMatrix *Aa; hypre_CSRMatrix *Ba; if (!mat) { hypre_error_in_arg(1); return hypre_error_flag; } Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat); Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat); if (hypre_ParCSRMatrixGetrowactive(mat)) { return(-1); } hypre_ParCSRMatrixGetrowactive(mat) = 1; #ifdef HYPRE_NO_GLOBAL_PARTITION row_start = hypre_ParCSRMatrixFirstRowIndex(mat); row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1; #else HYPRE_Int my_id; hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(mat), &my_id); row_end = hypre_ParCSRMatrixRowStarts(mat)[ my_id + 1 ]; row_start = hypre_ParCSRMatrixRowStarts(mat)[ my_id ]; #endif nrows = row_end - row_start; if (row < row_start || row >= row_end) { return(-1); } local_row = row - row_start; /* if buffer is not allocated and some information is requested, allocate buffer with the max row_nnz */ if ( !hypre_ParCSRMatrixRowvalues(mat) && (col_ind || values) ) { HYPRE_Int max_row_nnz; HYPRE_Int *row_nnz = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(nrows, NULL, hypre_CSRMatrixI(Aa), hypre_CSRMatrixI(Ba), row_nnz); hypre_TMemcpy(size, row_nnz + local_row, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); max_row_nnz = HYPRE_THRUST_CALL(reduce, row_nnz, row_nnz + nrows, 0, thrust::maximum<HYPRE_Int>()); /* HYPRE_Int *max_row_nnz_d = HYPRE_THRUST_CALL(max_element, row_nnz, row_nnz + nrows); hypre_TMemcpy( &max_row_nnz, max_row_nnz_d, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE ); */ hypre_TFree(row_nnz, HYPRE_MEMORY_DEVICE); hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_TAlloc(HYPRE_Complex, max_row_nnz, hypre_ParCSRMatrixMemoryLocation(mat)); hypre_ParCSRMatrixRowindices(mat) = (HYPRE_BigInt *) hypre_TAlloc(HYPRE_BigInt, max_row_nnz, hypre_ParCSRMatrixMemoryLocation(mat)); } else { HYPRE_Int *size_d = hypre_TAlloc(HYPRE_Int, 1, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(1, NULL, hypre_CSRMatrixI(Aa) + local_row, hypre_CSRMatrixI(Ba) + local_row, size_d); hypre_TMemcpy(size, size_d, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TFree(size_d, HYPRE_MEMORY_DEVICE); } if (col_ind || values) { if (hypre_ParCSRMatrixDeviceColMapOffd(mat) == NULL) { hypre_ParCSRMatrixDeviceColMapOffd(mat) = hypre_TAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(Ba), HYPRE_MEMORY_DEVICE); hypre_TMemcpy( hypre_ParCSRMatrixDeviceColMapOffd(mat), hypre_ParCSRMatrixColMapOffd(mat), HYPRE_BigInt, hypre_CSRMatrixNumCols(Ba), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST ); } hypreDevice_CopyParCSRRows( 1, NULL, -1, Ba != NULL, hypre_ParCSRMatrixFirstColDiag(mat), hypre_ParCSRMatrixDeviceColMapOffd(mat), hypre_CSRMatrixI(Aa) + local_row, hypre_CSRMatrixJ(Aa), hypre_CSRMatrixData(Aa), hypre_CSRMatrixI(Ba) + local_row, hypre_CSRMatrixJ(Ba), hypre_CSRMatrixData(Ba), NULL, hypre_ParCSRMatrixRowindices(mat), hypre_ParCSRMatrixRowvalues(mat) ); } if (col_ind) { *col_ind = hypre_ParCSRMatrixRowindices(mat); } if (values) { *values = hypre_ParCSRMatrixRowvalues(mat); } hypre_SyncCudaComputeStream(hypre_handle()); return hypre_error_flag; } #endif // #if defined(HYPRE_USING_CUDA) /*-------------------------------------------------------------------------- * HYPRE_ParCSRDiagScale *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRDiagScale( HYPRE_ParCSRMatrix HA, HYPRE_ParVector Hy, HYPRE_ParVector Hx ) { hypre_ParCSRMatrix *A = (hypre_ParCSRMatrix *) HA; hypre_ParVector *y = (hypre_ParVector *) Hy; hypre_ParVector *x = (hypre_ParVector *) Hx; HYPRE_Real *x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); HYPRE_Real *y_data = hypre_VectorData(hypre_ParVectorLocalVector(y)); HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A)); HYPRE_Int local_size = hypre_VectorSize(hypre_ParVectorLocalVector(x)); HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) hypreDevice_DiagScaleVector(local_size, A_i, A_data, y_data, x_data); //hypre_SyncCudaComputeStream(hypre_handle()); #else /* #if defined(HYPRE_USING_CUDA) */ HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(x_data,y_data,A_data,A_i) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < local_size; i++) { x_data[i] = y_data[i]/A_data[A_i[i]]; } #endif /* #if defined(HYPRE_USING_CUDA) */ return ierr; }
coarsen.c
/* Program to combine particles in a gadget-file. Number of particles combined is dependent on the distance from a given center icc -lm -openmp -o coarsen coarsen.c libgad.o icc -lm -lgad-altix-nopot -openmp -o ../bin/altix/coarsen coarsen.c -DNOPOT gcc -fopenmp -lm -lgad coarsen.c -o ~/bin/coarsen */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <string.h> #include "libgad.h" #define PI 3.14159265358979323846 #define GAP 8000 //min distance to boarders to ignore periodic boundaries #define h0 0.72 #define DIM 512 #define MIN(a, b) ((a)<(b)?(a):(b)) #define MAX(a, b) ((a)>(b)?(a):(b)) #define ABS(a) ((a) >= 0 ? (a) : -(a)) #define CMP(a,b) ((a)>(b)?(1):(-1)) #define PB(a,b) ((a)>(b)?(a-b):(a)) #define MOVE(a,b) PB(a+b/2,b) #define MV(a,b) ((a)+(b)/2)%(b) //#define SQR(x) (x)*(x) #define G 6.6742e-11 #define Msun 1.989e30 #define kpc 3.08567758128e19 struct part { fltarr pos; fltarr vel; int id; float mass; }; int cmp_mass(struct part *a, struct part *b) { if (a->mass > b->mass) return 1; else if (a->mass < b->mass) return -1; else return 0; } void usage() { fprintf(stderr,"Coarsen v0.01\n"); fprintf(stderr," -i <input file> -o <outputfile>\n"); fprintf(stderr," -f <Coarse factor> \n"); fprintf(stderr," -cm <centerX centerY centerZ>\n"); fprintf(stderr," -d <min-distance to cm>\n"); fprintf(stderr," -g <initial grid size>\n"); fprintf(stderr," -im <initial mass (default=mass of particle 0)>\n"); fprintf(stderr," -b <particles with mass greater imass*b will become bulge particles>\n"); fprintf(stderr," -l <linear increase of mass with distance>\n"); fprintf(stderr," -box <minx miny minz maxx maxy maxz>\n"); fprintf(stderr," -all <combine all particles regardless of position>\n"); fprintf(stderr," -max <maximum number of passes>\n"); exit(1); } int main (int argc, char *argv[]) { fltarr *pos, *vel, *posnew, *velnew; float *mass, *massnew; int *id, *idnew; char infile[256], outfile[256]; FILE *outf; struct part *partdata; struct header headin, headout; fltarr cm, pcm, vcm; float cf=100.0, dxgrid=100.0, fdum, keep; double ddum, dist, mindist, masscmb, imass=0.0, hbox, olddxgrid, omit, maxmindist; double a,b,normdist, maxdist, bulge=0.0, omitdist; fltarr min, max; int i,j,k,l, x,y,z, NBOX, iteration=0, go_on=1, cmball=0, maxiter=200, mincnt=0; int numpart, numpartnew, n,m, dum, icnt, idmin; int *friend, *index, cmb, pb[3], cnt, linear=0, box=0; int ****grid, ***gridsz; int debug=0; strcpy( infile,"<none>"); strcpy(outfile,"<none>"); i=1; while (i<argc) { if (!strcmp(argv[i],"-i")) { i++; strcpy(infile,argv[i]); i++; } else if (!strcmp(argv[i],"-o")) { i++; strcpy(outfile,argv[i]); i++; } else if (!strcmp(argv[i],"-f")) { i++; cf=atof(argv[i]); i++; } else if (!strcmp(argv[i],"-d")) { i++; keep=atof(argv[i]); i++; } else if (!strcmp(argv[i],"-max")) { i++; maxiter=atoi(argv[i]); i++; } else if (!strcmp(argv[i],"-cnt")) { i++; mincnt=atoi(argv[i]); i++; } else if (!strcmp(argv[i],"-all")) { i++; cmball=1; } else if (!strcmp(argv[i],"-debug")) { i++; debug=1; } else if (!strcmp(argv[i],"-im")) { i++; imass=atof(argv[i]); i++; } else if (!strcmp(argv[i],"-b")) { i++; bulge=atof(argv[i]); i++; } else if (!strcmp(argv[i],"-g")) { i++; dxgrid=atof(argv[i]); i++; } else if (!strcmp(argv[i],"-l")) { i++; linear=1; } else if (!strcmp(argv[i],"-cm")) { i++; cm[0]=atof(argv[i++]); cm[1]=atof(argv[i++]); cm[2]=atof(argv[i++]); } else if (!strcmp(argv[i],"-box")) { box=1; i++; min[0]=atof(argv[i++]); min[1]=atof(argv[i++]); min[2]=atof(argv[i++]); max[0]=atof(argv[i++]); max[1]=atof(argv[i++]); max[2]=atof(argv[i++]); } else usage(); } if (!strcmp(infile, outfile)) usage(); #ifdef _OPENMP #pragma omp parallel private(i) { i= omp_get_thread_num(); // printf("this is thread number %d \n", i); // #pragma omp barrier if (i==0) { printf ("Number of threads: %d\n", omp_get_num_threads()); } } #endif fflush(stdout); numpart=readgadget(infile, &headin, &pos, &vel, &id, &mass); if (0==imass) imass=mass[0]; hbox=headin.boxsize; double BOXSIZE=headin.boxsize; normdist=sqrt(3*SQR(hbox/2.0)); omitdist=keep; if (box) { omitdist=hbox; normdist=0; for (i=0; i<3; i++) { if (min[i]<max[i]) { normdist+=SQR((BOXSIZE-(max[i]-min[i]))/2.0); omitdist= (omitdist>(max[i]-min[i])) ? (max[i]-min[i]) : omitdist; } else { normdist+=SQR((min[i]-max[i])/2.0); ddum=BOXSIZE-(min[i]-max[i]); omitdist= (omitdist>ddum) ? (ddum) : omitdist; } } normdist=sqrt(normdist); omitdist=omitdist/2.0; printf("normdist %g\n", normdist);fflush(stdout); } a=( (2*normdist - keep*cf) / (normdist*SQR(keep) - SQR(normdist)*keep) ); b=( (2-a*keep*keep) / (keep) ); if (linear) { a=(cf-2)/(normdist - keep); b=cf-a*normdist; } printf("Finished reading input file.\n");fflush(stdout); while (go_on) { iteration++; friend = (int *) calloc(numpart,sizeof(int)); olddxgrid=dxgrid; dxgrid*=1.10; n=ceil(headin.boxsize/dxgrid); if (n<8) { n=8; dxgrid=olddxgrid; } m=ceil(numpart/pow(n,3))*2; printf("Building Grid (dim: %d)\n", n); grid= (int ****) malloc (n*sizeof(int ***)); //allocate 4-dimensional array to store particle indexes gridsz=(int ***) malloc (n*sizeof(int **)); for (i=0; i < n; i++) { grid[i]= (int ***) malloc (n * sizeof(int **)); gridsz[i]=(int **) malloc (n * sizeof(int *)); } for (i=0; i < n; i++) for (j=0; j < n; j++) { grid[i][j]= (int **) malloc (n * sizeof(int *)); gridsz[i][j]= (int *) malloc (n * sizeof(int )); } for (i=0; i < n; i++) for (j=0; j < n; j++) for (k=0; k < n; k++) { grid[i][j][k]= (int *) malloc (m * sizeof(int)); if (grid[i][j][k]==NULL) { fprintf(stderr,"Grid memory allocation failed!\n"); exit(3); } grid[i][j][k][0]=0; gridsz[i][j][k]=m; } j=0; //Populating the grid for (x=0; x< numpart; x++) { i=floor(pos[x][0]/dxgrid); j=floor(pos[x][1]/dxgrid); k=floor(pos[x][2]/dxgrid); if (gridsz[i][j][k]<=(grid[i][j][k][0]+2)) { grid[i][j][k]=realloc(grid[i][j][k],sizeof(int)*(gridsz[i][j][k]+m)); gridsz[i][j][k]+=m; } grid[i][j][k][0]++; grid[i][j][k][grid[i][j][k][0]]=x; } dum=0; for (i=0; i < n; i++) for (j=0; j < n; j++) for (k=0; k < n; k++) { dum+=grid[i][j][k][0]; grid[i][j][k]=realloc(grid[i][j][k],sizeof(int)*(grid[i][j][k][0]+1)); } if (dum!=numpart) { fprintf(stderr, "Something went wrong, Grid error!\n"); exit(2); } NBOX=1; printf("\nIteration: %d Gridsize: %g\n", iteration, dxgrid); cnt=0; maxmindist=0; #pragma omp parallel for firstprivate(y,z, i,j,k,l, omit, index, dist, icnt, idmin, mindist, pcm, maxmindist) reduction (+ : cnt) for (x=0; x < n; x++) { index=(int *) malloc(numpart*sizeof(int)); for (y=0; y < n; y++) for (z=0; z < n; z++) { if ((iteration > 3) && (!cmball)) { pcm[0]=(x+0.5)*dxgrid; pcm[1]=(y+0.5)*dxgrid; pcm[2]=(z+0.5)*dxgrid; dist=distance(pcm, cm); omit=omitdist+((sqrt(iteration)/2.0)*dxgrid); if (dist<omit) { cnt++; continue; } } icnt=0; for (i=(x-NBOX); i <= (x+NBOX); i++) for (j=(y-NBOX); j <= (y+NBOX); j++) for (k=(z-NBOX); k <= (z+NBOX); k++) { if ((iteration > 3) && (!cmball)) { pcm[0]=(i+0.5)*dxgrid; pcm[1]=(j+0.5)*dxgrid; pcm[2]=(k+0.5)*dxgrid; dist=distance(pcm, cm); if (dist<omit) { continue; } } for (l=1; l <= grid[(i+n)%n][(j+n)%n][(k+n)%n][0]; l++) { index[icnt++]=grid[(i+n)%n][(j+n)%n][(k+n)%n][l]; } } for (i=1; i<= grid[x][y][z][0]; i++) { j=grid[x][y][z][i]; if (j!=index[0]) { idmin=index[0]; } else { idmin=index[1]; } mindist=distance(pos[j], pos[idmin]); for (l=0; l< icnt; l++) { k=index[l]; if (j!=k) { dist=distance(pos[j],pos[k]); if (dist<mindist) { idmin=k; mindist=dist; } } } friend[j]=idmin; if (mindist>maxmindist) { maxmindist=mindist; } } } if ((x)==0) printf("icnt %d maxmindist %g\n", icnt, maxmindist);fflush(stdout); free(index); } // End of parallel region printf("gridcells omitted %d of %d\n", cnt, n*n*n); printf("Friends found\n");fflush(stdout); dum=0; numpartnew=0; posnew =(fltarr *)malloc(sizeof(fltarr)*numpart); velnew =(fltarr *)malloc(sizeof(fltarr)*numpart); idnew =(int *) malloc(sizeof(int)*numpart); massnew=(float *)malloc(sizeof(float)*numpart); if (massnew == NULL) { fprintf(stderr, "malloc failed!\n"); exit(1); } maxdist=0; for (i=0; i<numpart; i++) { cmb=0; if ((friend[i] >= 0) && (i==friend[friend[i]]) && (i!=friend[i])) { for (j=0; j<3; j++) {pcm[j]=0;vcm[j]=0;pb[j]=0;} for (j=0; j<3; j++) { if (ABS(pos[i][j]-pos[friend[i]][j]) < (hbox/2)) { pcm[j]+=pos[i][j]*mass[i]; pcm[j]+=pos[friend[i]][j]*mass[friend[i]]; } else { pb[j]=1; pcm[j]+=MOVE(pos[i][j], hbox)*mass[i]; pcm[j]+=MOVE(pos[friend[i]][j], hbox)*mass[friend[i]]; } vcm[j]+=vel[i][j]*mass[i]; vcm[j]+=vel[friend[i]][j]*mass[friend[i]]; } masscmb=mass[i]+mass[friend[i]]; for (j=0; j<3; j++) { if (pb[j]) pcm[j]=MOVE(pcm[j]/masscmb, hbox); else pcm[j]=pcm[j]/masscmb; vcm[j]=vcm[j]/masscmb; } if (cmball) cmb=1; else { if (box) dist=distbox(pcm, min, max); else dist=distance(pcm, cm); if (dist>maxdist) maxdist=dist; if (linear) { if ((dist > keep) && ( (a*dist+b) > (masscmb/imass))) cmb=1; } else if ((dist > keep) && ( (a*SQR(dist)+b*dist) > (masscmb/imass))) cmb=1; } } if (cmb) { for (j=0; j<3; j++) { posnew[numpartnew][j]=pcm[j]; velnew[numpartnew][j]=vcm[j]; } massnew[numpartnew]=masscmb; idnew[numpartnew]=id[i]; friend[friend[i]]=-1; friend[i]=-1; numpartnew++; } else if (friend[i]>=0) { for (j=0; j<3; j++) { posnew[numpartnew][j]=pos[i][j]; velnew[numpartnew][j]=vel[i][j]; } massnew[numpartnew]=mass[i]; idnew[numpartnew]=id[i]; numpartnew++; } } printf("# of particles: old %d new %d\n", numpart ,numpartnew); printf("maxdist: %g\n", maxdist); free(friend); for (i=0; i < n; i++) for (j=0; j < n; j++) for (k=0; k < n; k++) { free(grid[i][j][k]); } for (i=0; i < n; i++) for (j=0; j < n; j++) { free(grid[i][j]); free(gridsz[i][j]); } for (i=0; i < n; i++) { free(grid[i]); free(gridsz[i]); } free(grid); free(gridsz); if (numpartnew==numpart) go_on=0; if (numpartnew< mincnt) go_on=0; if (iteration >= maxiter ) go_on=0; if (iteration==200) go_on=0; // printf("%10.2f != %g\n", pos[12][2], posnew[12][2]); // printf("%10.2f != %g\n", vel[13][1], velnew[13][1]); // printf("%10.2f != %g\n", mass[133], massnew[133]); memcpy(pos[0], posnew[0], sizeof(fltarr)*numpartnew); memcpy(vel[0], velnew[0], sizeof(fltarr)*numpartnew); memcpy(id, idnew, sizeof(int)*numpartnew); memcpy(mass, massnew, sizeof(float)*numpartnew); pos = realloc(pos, sizeof(fltarr)*numpartnew); vel = realloc(vel, sizeof(fltarr)*numpartnew); id = realloc(id, sizeof(int)*numpartnew); mass= realloc(mass, sizeof(float)*numpartnew); // printf("%10.2f ?= %g\n", pos[12][2], posnew[12][2]); // printf("%10.2f ?= %g\n", vel[13][1], velnew[13][1]); // printf("%10.2f ?= %g\n", mass[133], massnew[133]); numpart=numpartnew; free(posnew); free(velnew); free(idnew); free(massnew); } posnew =(fltarr *)malloc(sizeof(fltarr)*numpart); velnew =(fltarr *)malloc(sizeof(fltarr)*numpart); idnew =(int *) malloc(sizeof(int)*numpart); massnew =(float *)malloc(sizeof(float)*numpart); partdata=(struct part *)malloc(sizeof(struct part)*numpart); headout=headin; for (j=0; j<6; j++) { headout.npart[j]=0; headout.nall[j]=0; } for (i=0; i<numpart; i++) { for (j=0; j<3; j++) { partdata[i].pos[j]=pos[i][j]; partdata[i].vel[j]=vel[i][j]; } partdata[i].id=id[i]; partdata[i].mass=mass[i]; } qsort(&partdata[0], numpart, sizeof(struct part),(void *)cmp_mass); for (i=0; i<numpart; i++) { for (j=0; j<3; j++) { posnew[i][j] =partdata[i].pos[j]; velnew[i][j] =partdata[i].vel[j]; } massnew[i]=partdata[i].mass; idnew[i] =partdata[i].id; // if (massnew[i]==imass) // { // headout.npart[1]++; // headout.nall[1]++; // } else if ((0==bulge) || (massnew[i] <= (bulge*imass))) { headout.npart[2]++; headout.nall[2]++; } else { headout.npart[3]++; headout.nall[3]++; } } writegadget(outfile, headout, posnew, velnew, idnew, massnew); return 0; }
convolution_2x2_pack8_fp16.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv2x2s1_weight_fp16_pack8_avx(const Mat& kernel, Mat& kernel_tm_pack8, int num_input, int num_output) { // src = kw-kh-inch-outch // dst = 8b-8a-kw-kh-inch/8a-outch/8b Mat weight_data_r2 = kernel.reshape(4, num_input, num_output); kernel_tm_pack8.create(4, num_input / 8, num_output / 8, (size_t)2 * 64, 64); for (int q = 0; q + 7 < num_output; q += 8) { const Mat k0 = weight_data_r2.channel(q); const Mat k1 = weight_data_r2.channel(q + 1); const Mat k2 = weight_data_r2.channel(q + 2); const Mat k3 = weight_data_r2.channel(q + 3); const Mat k4 = weight_data_r2.channel(q + 4); const Mat k5 = weight_data_r2.channel(q + 5); const Mat k6 = weight_data_r2.channel(q + 6); const Mat k7 = weight_data_r2.channel(q + 7); unsigned short* g00 = kernel_tm_pack8.channel(q / 8); for (int p = 0; p + 7 < num_input; p += 8) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k04 = k0.row(p + 4); const float* k05 = k0.row(p + 5); const float* k06 = k0.row(p + 6); const float* k07 = k0.row(p + 7); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k14 = k1.row(p + 4); const float* k15 = k1.row(p + 5); const float* k16 = k1.row(p + 6); const float* k17 = k1.row(p + 7); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k24 = k2.row(p + 4); const float* k25 = k2.row(p + 5); const float* k26 = k2.row(p + 6); const float* k27 = k2.row(p + 7); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k34 = k3.row(p + 4); const float* k35 = k3.row(p + 5); const float* k36 = k3.row(p + 6); const float* k37 = k3.row(p + 7); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k44 = k4.row(p + 4); const float* k45 = k4.row(p + 5); const float* k46 = k4.row(p + 6); const float* k47 = k4.row(p + 7); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k54 = k5.row(p + 4); const float* k55 = k5.row(p + 5); const float* k56 = k5.row(p + 6); const float* k57 = k5.row(p + 7); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k64 = k6.row(p + 4); const float* k65 = k6.row(p + 5); const float* k66 = k6.row(p + 6); const float* k67 = k6.row(p + 7); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); const float* k74 = k7.row(p + 4); const float* k75 = k7.row(p + 5); const float* k76 = k7.row(p + 6); const float* k77 = k7.row(p + 7); for (int k = 0; k < 4; k++) { g00[0] = float32_to_float16(k00[k]); g00[1] = float32_to_float16(k10[k]); g00[2] = float32_to_float16(k20[k]); g00[3] = float32_to_float16(k30[k]); g00[4] = float32_to_float16(k40[k]); g00[5] = float32_to_float16(k50[k]); g00[6] = float32_to_float16(k60[k]); g00[7] = float32_to_float16(k70[k]); g00 += 8; g00[0] = float32_to_float16(k01[k]); g00[1] = float32_to_float16(k11[k]); g00[2] = float32_to_float16(k21[k]); g00[3] = float32_to_float16(k31[k]); g00[4] = float32_to_float16(k41[k]); g00[5] = float32_to_float16(k51[k]); g00[6] = float32_to_float16(k61[k]); g00[7] = float32_to_float16(k71[k]); g00 += 8; g00[0] = float32_to_float16(k02[k]); g00[1] = float32_to_float16(k12[k]); g00[2] = float32_to_float16(k22[k]); g00[3] = float32_to_float16(k32[k]); g00[4] = float32_to_float16(k42[k]); g00[5] = float32_to_float16(k52[k]); g00[6] = float32_to_float16(k62[k]); g00[7] = float32_to_float16(k72[k]); g00 += 8; g00[0] = float32_to_float16(k03[k]); g00[1] = float32_to_float16(k13[k]); g00[2] = float32_to_float16(k23[k]); g00[3] = float32_to_float16(k33[k]); g00[4] = float32_to_float16(k43[k]); g00[5] = float32_to_float16(k53[k]); g00[6] = float32_to_float16(k63[k]); g00[7] = float32_to_float16(k73[k]); g00 += 8; g00[0] = float32_to_float16(k04[k]); g00[1] = float32_to_float16(k14[k]); g00[2] = float32_to_float16(k24[k]); g00[3] = float32_to_float16(k34[k]); g00[4] = float32_to_float16(k44[k]); g00[5] = float32_to_float16(k54[k]); g00[6] = float32_to_float16(k64[k]); g00[7] = float32_to_float16(k74[k]); g00 += 8; g00[0] = float32_to_float16(k05[k]); g00[1] = float32_to_float16(k15[k]); g00[2] = float32_to_float16(k25[k]); g00[3] = float32_to_float16(k35[k]); g00[4] = float32_to_float16(k45[k]); g00[5] = float32_to_float16(k55[k]); g00[6] = float32_to_float16(k65[k]); g00[7] = float32_to_float16(k75[k]); g00 += 8; g00[0] = float32_to_float16(k06[k]); g00[1] = float32_to_float16(k16[k]); g00[2] = float32_to_float16(k26[k]); g00[3] = float32_to_float16(k36[k]); g00[4] = float32_to_float16(k46[k]); g00[5] = float32_to_float16(k56[k]); g00[6] = float32_to_float16(k66[k]); g00[7] = float32_to_float16(k76[k]); g00 += 8; g00[0] = float32_to_float16(k07[k]); g00[1] = float32_to_float16(k17[k]); g00[2] = float32_to_float16(k27[k]); g00[3] = float32_to_float16(k37[k]); g00[4] = float32_to_float16(k47[k]); g00[5] = float32_to_float16(k57[k]); g00[6] = float32_to_float16(k67[k]); g00[7] = float32_to_float16(k77[k]); g00 += 8; } } } } static void conv2x2s1_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); out0.fill(_bias0); for (int q = 0; q < inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const unsigned short* kptr = (const unsigned short*)kernel.channel(p).row(q); // const float* kptr = (const float*)kernel + 4 * inch * p * 64; int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 1 < outw; j += 2) { __m256 _sum0 = _mm256_loadu_ps(outptr0); __m256 _sum1 = _mm256_loadu_ps(outptr0 + 8); __m256 _r00 = _mm256_broadcast_ss(r0); __m256 _r01 = _mm256_broadcast_ss(r0 + 1); __m256 _r02 = _mm256_broadcast_ss(r0 + 2); __m256 _r03 = _mm256_broadcast_ss(r0 + 3); __m256 _r04 = _mm256_broadcast_ss(r0 + 4); __m256 _r05 = _mm256_broadcast_ss(r0 + 5); __m256 _r06 = _mm256_broadcast_ss(r0 + 6); __m256 _r07 = _mm256_broadcast_ss(r0 + 7); r0 += 8; __m256 _k00 = loadfp16(kptr); __m256 _k01 = loadfp16(kptr + 8); __m256 _k02 = loadfp16(kptr + 16); __m256 _k03 = loadfp16(kptr + 24); kptr += 32; _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k03, _r03, _sum0); __m256 _k04 = loadfp16(kptr); __m256 _k05 = loadfp16(kptr + 8); __m256 _k06 = loadfp16(kptr + 16); __m256 _k07 = loadfp16(kptr + 24); kptr += 32; _sum0 = _mm256_comp_fmadd_ps(_k04, _r04, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k05, _r05, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k06, _r06, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k07, _r07, _sum0); //======================================== _r00 = _mm256_broadcast_ss(r0); _r01 = _mm256_broadcast_ss(r0 + 1); _r02 = _mm256_broadcast_ss(r0 + 2); _r03 = _mm256_broadcast_ss(r0 + 3); _r04 = _mm256_broadcast_ss(r0 + 4); _r05 = _mm256_broadcast_ss(r0 + 5); _r06 = _mm256_broadcast_ss(r0 + 6); _r07 = _mm256_broadcast_ss(r0 + 7); r0 += 8; _sum1 = _mm256_comp_fmadd_ps(_k00, _r00, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k03, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k04, _r04, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k05, _r05, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k06, _r06, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k07, _r07, _sum1); _k00 = loadfp16(kptr); _k01 = loadfp16(kptr + 8); _k02 = loadfp16(kptr + 16); _k03 = loadfp16(kptr + 24); kptr += 32; _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k03, _r03, _sum0); _k04 = loadfp16(kptr); _k05 = loadfp16(kptr + 8); _k06 = loadfp16(kptr + 16); _k07 = loadfp16(kptr + 24); kptr += 32; _sum0 = _mm256_comp_fmadd_ps(_k04, _r04, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k05, _r05, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k06, _r06, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k07, _r07, _sum0); _r00 = _mm256_broadcast_ss(r0); _r01 = _mm256_broadcast_ss(r0 + 1); _r02 = _mm256_broadcast_ss(r0 + 2); _r03 = _mm256_broadcast_ss(r0 + 3); _r04 = _mm256_broadcast_ss(r0 + 4); _r05 = _mm256_broadcast_ss(r0 + 5); _r06 = _mm256_broadcast_ss(r0 + 6); _r07 = _mm256_broadcast_ss(r0 + 7); _sum1 = _mm256_comp_fmadd_ps(_k00, _r00, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k03, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k04, _r04, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k05, _r05, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k06, _r06, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k07, _r07, _sum1); //=============== __m256 _r10 = _mm256_broadcast_ss(r1); __m256 _r11 = _mm256_broadcast_ss(r1 + 1); __m256 _r12 = _mm256_broadcast_ss(r1 + 2); __m256 _r13 = _mm256_broadcast_ss(r1 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 5); __m256 _r16 = _mm256_broadcast_ss(r1 + 6); __m256 _r17 = _mm256_broadcast_ss(r1 + 7); __m256 _k10 = loadfp16(kptr); __m256 _k11 = loadfp16(kptr + 8); __m256 _k12 = loadfp16(kptr + 16); __m256 _k13 = loadfp16(kptr + 24); kptr += 32; _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k13, _r13, _sum0); __m256 _k14 = loadfp16(kptr); __m256 _k15 = loadfp16(kptr + 8); __m256 _k16 = loadfp16(kptr + 16); __m256 _k17 = loadfp16(kptr + 24); kptr += 32; _sum0 = _mm256_comp_fmadd_ps(_k14, _r14, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k15, _r15, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k16, _r16, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k17, _r17, _sum0); //======================================= r1 += 8; _r10 = _mm256_broadcast_ss(r1); _r11 = _mm256_broadcast_ss(r1 + 1); _r12 = _mm256_broadcast_ss(r1 + 2); _r13 = _mm256_broadcast_ss(r1 + 3); _r14 = _mm256_broadcast_ss(r1 + 4); _r15 = _mm256_broadcast_ss(r1 + 5); _r16 = _mm256_broadcast_ss(r1 + 6); _r17 = _mm256_broadcast_ss(r1 + 7); _sum1 = _mm256_comp_fmadd_ps(_k10, _r10, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k13, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k14, _r14, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k15, _r15, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k16, _r16, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k17, _r17, _sum1); _k10 = loadfp16(kptr); _k11 = loadfp16(kptr + 8); _k12 = loadfp16(kptr + 16); _k13 = loadfp16(kptr + 24); kptr += 32; _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k13, _r13, _sum0); _k14 = loadfp16(kptr); _k15 = loadfp16(kptr + 8); _k16 = loadfp16(kptr + 16); _k17 = loadfp16(kptr + 24); _sum0 = _mm256_comp_fmadd_ps(_k14, _r14, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k15, _r15, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k16, _r16, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k17, _r17, _sum0); r1 += 8; _r10 = _mm256_broadcast_ss(r1); _r11 = _mm256_broadcast_ss(r1 + 1); _r12 = _mm256_broadcast_ss(r1 + 2); _r13 = _mm256_broadcast_ss(r1 + 3); _r14 = _mm256_broadcast_ss(r1 + 4); _r15 = _mm256_broadcast_ss(r1 + 5); _r16 = _mm256_broadcast_ss(r1 + 6); _r17 = _mm256_broadcast_ss(r1 + 7); _sum1 = _mm256_comp_fmadd_ps(_k10, _r10, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k13, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k14, _r14, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k15, _r15, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k16, _r16, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k17, _r17, _sum1); kptr -= 224; _mm256_storeu_ps(outptr0, _sum0); _mm256_storeu_ps(outptr0 + 8, _sum1); outptr0 += 16; } for (; j < outw; j++) { __m256 _sum = _mm256_loadu_ps(outptr0); __m256 _r00 = _mm256_broadcast_ss(r0); __m256 _r01 = _mm256_broadcast_ss(r0 + 1); __m256 _r02 = _mm256_broadcast_ss(r0 + 2); __m256 _r03 = _mm256_broadcast_ss(r0 + 3); __m256 _r04 = _mm256_broadcast_ss(r0 + 4); __m256 _r05 = _mm256_broadcast_ss(r0 + 5); __m256 _r06 = _mm256_broadcast_ss(r0 + 6); __m256 _r07 = _mm256_broadcast_ss(r0 + 7); __m256 _k00 = loadfp16(kptr); __m256 _k01 = loadfp16(kptr + 8); __m256 _k02 = loadfp16(kptr + 16); __m256 _k03 = loadfp16(kptr + 24); kptr += 32; _sum = _mm256_comp_fmadd_ps(_k00, _r00, _sum); _sum = _mm256_comp_fmadd_ps(_k01, _r01, _sum); _sum = _mm256_comp_fmadd_ps(_k02, _r02, _sum); _sum = _mm256_comp_fmadd_ps(_k03, _r03, _sum); __m256 _k04 = loadfp16(kptr); __m256 _k05 = loadfp16(kptr + 8); __m256 _k06 = loadfp16(kptr + 16); __m256 _k07 = loadfp16(kptr + 24); kptr += 32; _sum = _mm256_comp_fmadd_ps(_k04, _r04, _sum); _sum = _mm256_comp_fmadd_ps(_k05, _r05, _sum); _sum = _mm256_comp_fmadd_ps(_k06, _r06, _sum); _sum = _mm256_comp_fmadd_ps(_k07, _r07, _sum); //======================================== r0 += 8; _r00 = _mm256_broadcast_ss(r0); _r01 = _mm256_broadcast_ss(r0 + 1); _r02 = _mm256_broadcast_ss(r0 + 2); _r03 = _mm256_broadcast_ss(r0 + 3); _r04 = _mm256_broadcast_ss(r0 + 4); _r05 = _mm256_broadcast_ss(r0 + 5); _r06 = _mm256_broadcast_ss(r0 + 6); _r07 = _mm256_broadcast_ss(r0 + 7); _k00 = loadfp16(kptr); _k01 = loadfp16(kptr + 8); _k02 = loadfp16(kptr + 16); _k03 = loadfp16(kptr + 24); kptr += 32; _sum = _mm256_comp_fmadd_ps(_k00, _r00, _sum); _sum = _mm256_comp_fmadd_ps(_k01, _r01, _sum); _sum = _mm256_comp_fmadd_ps(_k02, _r02, _sum); _sum = _mm256_comp_fmadd_ps(_k03, _r03, _sum); _k04 = loadfp16(kptr); _k05 = loadfp16(kptr + 8); _k06 = loadfp16(kptr + 16); _k07 = loadfp16(kptr + 24); kptr += 32; _sum = _mm256_comp_fmadd_ps(_k04, _r04, _sum); _sum = _mm256_comp_fmadd_ps(_k05, _r05, _sum); _sum = _mm256_comp_fmadd_ps(_k06, _r06, _sum); _sum = _mm256_comp_fmadd_ps(_k07, _r07, _sum); //=============== __m256 _r10 = _mm256_broadcast_ss(r1); __m256 _r11 = _mm256_broadcast_ss(r1 + 1); __m256 _r12 = _mm256_broadcast_ss(r1 + 2); __m256 _r13 = _mm256_broadcast_ss(r1 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 5); __m256 _r16 = _mm256_broadcast_ss(r1 + 6); __m256 _r17 = _mm256_broadcast_ss(r1 + 7); __m256 _k10 = loadfp16(kptr); __m256 _k11 = loadfp16(kptr + 8); __m256 _k12 = loadfp16(kptr + 16); __m256 _k13 = loadfp16(kptr + 24); kptr += 32; _sum = _mm256_comp_fmadd_ps(_k10, _r10, _sum); _sum = _mm256_comp_fmadd_ps(_k11, _r11, _sum); _sum = _mm256_comp_fmadd_ps(_k12, _r12, _sum); _sum = _mm256_comp_fmadd_ps(_k13, _r13, _sum); __m256 _k14 = loadfp16(kptr); __m256 _k15 = loadfp16(kptr + 8); __m256 _k16 = loadfp16(kptr + 16); __m256 _k17 = loadfp16(kptr + 24); kptr += 32; _sum = _mm256_comp_fmadd_ps(_k14, _r14, _sum); _sum = _mm256_comp_fmadd_ps(_k15, _r15, _sum); _sum = _mm256_comp_fmadd_ps(_k16, _r16, _sum); _sum = _mm256_comp_fmadd_ps(_k17, _r17, _sum); //======================================= r1 += 8; _r10 = _mm256_broadcast_ss(r1); _r11 = _mm256_broadcast_ss(r1 + 1); _r12 = _mm256_broadcast_ss(r1 + 2); _r13 = _mm256_broadcast_ss(r1 + 3); _r14 = _mm256_broadcast_ss(r1 + 4); _r15 = _mm256_broadcast_ss(r1 + 5); _r16 = _mm256_broadcast_ss(r1 + 6); _r17 = _mm256_broadcast_ss(r1 + 7); _k10 = loadfp16(kptr); _k11 = loadfp16(kptr + 8); _k12 = loadfp16(kptr + 16); _k13 = loadfp16(kptr + 24); kptr += 32; _sum = _mm256_comp_fmadd_ps(_k10, _r10, _sum); _sum = _mm256_comp_fmadd_ps(_k11, _r11, _sum); _sum = _mm256_comp_fmadd_ps(_k12, _r12, _sum); _sum = _mm256_comp_fmadd_ps(_k13, _r13, _sum); _k14 = loadfp16(kptr); _k15 = loadfp16(kptr + 8); _k16 = loadfp16(kptr + 16); _k17 = loadfp16(kptr + 24); _sum = _mm256_comp_fmadd_ps(_k14, _r14, _sum); _sum = _mm256_comp_fmadd_ps(_k15, _r15, _sum); _sum = _mm256_comp_fmadd_ps(_k16, _r16, _sum); _sum = _mm256_comp_fmadd_ps(_k17, _r17, _sum); kptr -= 224; _mm256_storeu_ps(outptr0, _sum); outptr0 += 8; } r0 += 8; r1 += 8; } } } }
ckernels.c
#include <math.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #define max(a,b) \ ({ __typeof__ (a) _a = (a); \ __typeof__ (b) _b = (b); \ _a > _b ? _a : _b; }) #define min(a,b) \ ({ __typeof__ (a) _a = (a); \ __typeof__ (b) _b = (b); \ _a < _b ? _a : _b; }) #define sqr(x) x*x #define EPSILON 1E-250 int myprint(const char *msg) { printf("%s", msg); return 0; } void my_conv_batch(const double *u, double * result, double gamma, int W, int H, int D, int nvectors) { int N = W*H*D; int M = max(max(W, H), D); double* kernel1d = (double*)malloc(M*sizeof(double)); for (int i=0; i<M; i++) { // kernel1d[i] = max(EPSILON, exp(-i*i / gamma)); double t = i/(double)(M-1); kernel1d[i] = max(EPSILON, exp(-t*t / gamma)); // kernel1d[i] = max(EPSILON, exp(-sqr(i/(double)(M-1)) / gamma)); } double* tmp = (double*)malloc(W*H*D*sizeof(double)); // allocating here ; otherwise not thread-safe // printf("Kernel 1d:\n"); // for(int i=0;i<M;++i) // { // printf("%g, ",kernel1d[i]); // } for (int nv = 0; nv< nvectors; nv++) { #pragma omp parallel for for (int d=0; d<D; d++) { for (int i=0; i<H; i++) { for (int j=0; j<W; j++) { double conv = 0; for (int k=0; k<W; k++) { conv+=kernel1d[abs(j-k)]*u[nv*N + d*W*H + i*W + k]; } // Stocké en transposé pour accélérer la lecture dans la prochaine boucle. tmp[d*W*H + j*H+i] = conv; } } for (int j=0; j<W; j++) { for (int i=0; i<H; i++) { double conv = 0; for (int k=0; k<H; k++) { conv+=kernel1d[abs(i-k)]*tmp[d*W*H + j*H + k]; } result[nv*N + (i*W+j)*D + d] = conv; } } } #pragma omp parallel for for (int i=0; i<H; i++) { for (int j=0; j<W; j++) { for (int d=0; d<D; d++) { double conv = 0; for (int k=0; k<D; k++) { // conv+=kernel1d[abs(d-k)]*result[k + (i*W + j)*D]; conv+=kernel1d[abs(d-k)]*result[nv*N + k + (i*W + j)*D]; } tmp[d*W*H + i*W+j] = conv; } } } memcpy(result+nv*N, tmp, W*H*D*sizeof(double)); } free(tmp); free(kernel1d); } void convolution_batch_2d(const double* u, const double * kernel1d, double* result, int W, int H, int nvectors) { int N = W*H; double* tmp = (double*)malloc(W*H*sizeof(double)); // allocating here ; otherwise not thread-safe for (int nv=0; nv<nvectors; nv++) { #pragma omp parallel for for (int i=0; i<H; i++) { for (int j=0; j<W; j++) { double conv = 0; for (int k=0; k<W; k++) { conv+=kernel1d[abs(j-k)]*u[nv*N + i*W + k]; } tmp[i+j*H] = conv; } } #pragma omp parallel for for (int j=0; j<W; j++) { for (int i=0; i<H; i++) { double conv = 0; for (int k=0; k<H; k++) { conv+=kernel1d[abs(i-k)]*tmp[k + j*H]; } result[nv*N + i*W+j] = conv; } } } free(tmp); } void convolution_batch(const double *u, const double * kernel1d, double * result, int W, int H, int D, int nvectors) { int N = W*H*D; double* tmp = (double*)malloc(W*H*D*sizeof(double)); // allocating here ; otherwise not thread-safe // printf("\nKernel 1d:\n"); // int M = max(max(W, H), D); // for(int i=0;i<M;++i) // { // printf("%g ",kernel1d[i]); // } // printf("\n"); // // printf("\nu:"); // for(int i=0;i<N;++i) // { // if(i%10 == 0) printf("\n"); // printf("%g ",u[i]); // } // printf("\n"); // // printf("\nresult:"); // for(int i=0;i<N;++i) // { // if(i%10 == 0) printf("\n"); // printf("%g ",result[i]); // } // printf("\n"); for (int nv = 0; nv< nvectors; nv++) { #pragma omp parallel for for (int d=0; d<D; d++) { for (int i=0; i<H; i++) { for (int j=0; j<W; j++) { double conv = 0; for (int k=0; k<W; k++) { conv+=kernel1d[abs(j-k)]*u[nv*N + d*W*H + i*W + k]; } // Stocké en transposé pour accélérer la lecture dans la prochaine boucle. tmp[d*W*H + j*H+i] = conv; } } for (int j=0; j<W; j++) { for (int i=0; i<H; i++) { double conv = 0; for (int k=0; k<H; k++) { conv+=kernel1d[abs(i-k)]*tmp[d*W*H + j*H + k]; } result[nv*N + (i*W+j)*D + d] = conv; } } } #pragma omp parallel for for (int i=0; i<H; i++) { for (int j=0; j<W; j++) { for (int d=0; d<D; d++) { double conv = 0; for (int k=0; k<D; k++) { // conv+=kernel1d[abs(d-k)]*result[k + (i*W + j)*D]; conv+=kernel1d[abs(d-k)]*result[nv*N + k + (i*W + j)*D]; } tmp[d*W*H + i*W+j] = conv; } } } memcpy(result+nv*N, tmp, W*H*D*sizeof(double)); } free(tmp); } void convolution(const double * u, const double * kernel1d, double * result, int W, int H, int D) { convolution_batch(u, kernel1d, result, W, H, D, 1); } void convolution_batch_build_kernel(const double * u, double * result, double gamma, int W, int H, int D, int nvectors) { int M = max(max(W, H), D); double* kernel1d = (double*)malloc(M*sizeof(double)); for (int i=0; i<M; i++) { // kernel1d[i] = max(EPSILON, exp(-i*i / gamma)); double t = i/(double)(M-1); kernel1d[i] = max(EPSILON, exp(-t*t / gamma)); // kernel1d[i] = max(EPSILON, exp(-sqr(i/(double)(M-1)) / gamma)); } convolution_batch(u, kernel1d, result, W, H, D, nvectors); free(kernel1d); } void convolution_build_kernel(const double * u, double * result, double gamma, int W, int H, int D) { convolution_batch_build_kernel(u, result, gamma, W, H, D, 1); } int main(int argc, char *argv[]) { // Test the convolution with 1 vector int n = 16; int N = n*n*n; int nv = 3; double gamma = 2*sqr(0.05); double * u = (double*)malloc(N*nv*sizeof(double)); double * res = (double*)malloc(N*nv*sizeof(double)); double * res2 = (double*)malloc(N*nv*sizeof(double)); for(int i=0;i<N*nv;++i) { u[i] = 0.0; res[i] = 0.0; } u[0] = 1.0; if(nv>= 2) u[N + n-1] = 1.0; if(nv>= 3) u[2*N + n/2] = 1.0; // convolution(u,res,gamma,n,n,n); convolution_batch_build_kernel(u,res,gamma,n,n,n,nv); // Display only the first line of each vector printf("Vecteur:\n"); for(int j=0;j<nv;++j) { for(int i=0;i<n;++i) { printf("%g, ",res[j*N + i]); } printf("\n\n"); } int W = n; int H = n; int D = n; // Test convolution_batch int M = max(max(W, H), D); double* kernel1d = (double*)malloc(M*sizeof(double)); for (int i=0; i<M; i++) { // kernel1d[i] = max(EPSILON, exp(-i*i / gamma)); double t = i/(double)(M-1); kernel1d[i] = max(EPSILON, exp(-t*t / gamma)); // kernel1d[i] = max(EPSILON, exp(-sqr(i/(double)(M-1)) / gamma)); } convolution_batch(u,kernel1d,res2,n,n,n,nv); // Display only the first line of each vector printf("Vecteur:\n"); for(int j=0;j<nv;++j) { for(int i=0;i<n;++i) { printf("%g, ",res2[j*N + i]); } printf("\n\n"); } // // Display the whole vector // for(int i=0;i<N;++i) // { // if(i%n == 0 && i!=0) printf("\n"); // if(i%(n*n) == 0 && i!=0) printf("\n"); // printf("%g, ",res[i]); // } free(u); free(res); free(res2); }
GB_binop__lor_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lor_uint64 // A.*B function (eWiseMult): GB_AemultB__lor_uint64 // A*D function (colscale): GB_AxD__lor_uint64 // D*A function (rowscale): GB_DxB__lor_uint64 // C+=B function (dense accum): GB_Cdense_accumB__lor_uint64 // C+=b function (dense accum): GB_Cdense_accumb__lor_uint64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_uint64 // C=scalar+B GB_bind1st__lor_uint64 // C=scalar+B' GB_bind1st_tran__lor_uint64 // C=A+scalar GB_bind2nd__lor_uint64 // C=A'+scalar GB_bind2nd_tran__lor_uint64 // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = ((x != 0) || (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_UINT64 || GxB_NO_LOR_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lor_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lor_uint64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lor_uint64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lor_uint64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lor_uint64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__lor_uint64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lor_uint64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lor_uint64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t bij = Bx [p] ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lor_uint64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__lor_uint64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__lor_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lcm2_profiler.c
/** @file lcm2_profiler.c * * @par Copyright: * 2009-2018 (C) Kai-Uwe Behrmann * * @brief littleCMS CMM profile generator for Oyranos * @internal * @author Kai-Uwe Behrmann <ku.b@gmx.de> * @par License: * MIT <http://www.opensource.org/licenses/MIT> * @since 2009/10/24 */ #include "lcm2_profiler.h" #include <assert.h> #include <lcms2.h> #include <stdarg.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include <wchar.h> #ifndef OY_UNUSED #if (__GNUC__*100 + __GNUC_MINOR__) >= 406 #define OY_UNUSED __attribute__ ((unused)) #elif defined(_MSC_VER) #define OY_UNUSED __declspec(unused) #else #define OY_UNUSED #endif #endif #ifndef OY_FALLTHROUGH #if defined(__clang__) #define OY_FALLTHROUGH #elif __GNUC__ >= 7 #define OY_FALLTHROUGH __attribute__ ((fallthrough)); #else #define OY_FALLTHROUGH #endif #endif #if LCMS_VERSION < 2050 /* 'dscm' */ #define cmsSigProfileDescriptionMLTag 0x6473636d #endif #define lcm2Free_m(v) if(v) { free(v); v = NULL; } extern lcm2Message_f lcm2msg_p; static const int max_channels = 16; /* core functions */ typedef struct { cmsHTRANSFORM in2MySpace; cmsHTRANSFORM mySpace2Out; lcm2Sampler_f sampler; void * sampler_variables; int channelsIn; int channelsProcess; int channelsOut; } lcm2Cargo_s; int lcm2samplerDouble ( double in[], double out[], void * Cargo ) { int i; lcm2Cargo_s * d = (lcm2Cargo_s*) Cargo; // color convert from input space to prcess color space if(d->in2MySpace) cmsDoTransform( d->in2MySpace, in, in, 1 ); // custom data processing d->sampler(in,out,d->sampler_variables); // converting from process space to output space if(d->mySpace2Out) cmsDoTransform( d->mySpace2Out, out, out, 1 ); // clipping for(i = 0; i < d->channelsOut; ++i) { if(out[i] > 1.0) out[i] = 1.0; if(out[i] < 0.0) out[i] = 0.0; } return TRUE; } int lcm2sampler16 (const cmsUInt16Number In[], cmsUInt16Number Out[], void * Cargo) { int i, v, result = TRUE; double in[max_channels], out[max_channels], scaler = 65536.0; lcm2Cargo_s * d = (lcm2Cargo_s*) Cargo; for(i = 0; i < d->channelsIn; ++i) in[i] = In[i] / scaler; result = lcm2samplerDouble( in, out, Cargo ); for(i = 0; i < d->channelsOut; ++i) { v = out[i] * scaler; // integer clipping if(v > 65535) Out[i] = 65535; else Out[i] = v; } return result; } int lcm2samplerFloat ( const cmsFloat32Number In[], cmsFloat32Number Out[], void * Cargo ) { int i, result = TRUE; double in[max_channels], out[max_channels]; lcm2Cargo_s * d = (lcm2Cargo_s*) Cargo; for(i = 0; i < d->channelsIn; ++i) in[i] = In[i]; result = lcm2samplerDouble( in, out, Cargo ); for(i = 0; i < d->channelsOut; ++i) Out[i] = out[i]; return result; } /** \addtogroup profiler ICC profiler API * @brief Easy to use API to generate matrix and LUT ICC profiles. * * @{ */ /** Function lcm2OpenProfileFile * @brief Open a profile from file * * @code // create ICC profile with linear gamma, RGB.709 primaries + D65 from wildcard if(in_space_profile) h_in_space = lcm2OpenProfileFile( "*srgblinear", NULL ); @endcode * * @param[in] my_space_profile operating color space. * Use a file name or * possible wildcards: * - *srgblinear * - *srgb * - *lab * - *rec601.625.linear * - *rec601.525.linear * @param[in] my_space_profile_path path name for * for my_space_profile; optional * @return lcms profile handle * * @version Oyranos: 0.9.6 * @date 2016/03/04 * @since 2016/03/04 (Oyranos: 0.9.6) */ cmsHPROFILE lcm2OpenProfileFile ( const char * my_space_profile, const char * my_space_profile_path ) { cmsHPROFILE h_my_space = 0; if(my_space_profile_path == NULL) my_space_profile_path = ""; if(my_space_profile && my_space_profile[0]) { char * full_name = (char*) malloc(strlen(my_space_profile_path) + strlen(my_space_profile) + 1); if(!full_name) return NULL; sprintf( full_name, "%s%s", my_space_profile_path, my_space_profile ); if(strcmp(my_space_profile,"*lab") == 0) h_my_space = cmsCreateLab4Profile(cmsD50_xyY()); else if(strcmp(my_space_profile,"*xyz") == 0) h_my_space = cmsCreateXYZProfile( ); else if(strcmp(my_space_profile,"*srgb") == 0) h_my_space = cmsCreate_sRGBProfile( ); else if(strcmp(my_space_profile,"*srgblinear") == 0) h_my_space = lcm2CreateICCMatrixProfile2( 1.0, 0.64, 0.33, 0.30, 0.60, 0.15, 0.06, 0.3127,0.329 ); else /* ITU-R BT.601-7 625-line, 50 field/s systems */ if(strcmp(my_space_profile,"*rec601.625.linear") == 0) h_my_space = lcm2CreateICCMatrixProfile2( 1.0, 0.64, 0.33, 0.29, 0.60, 0.15, 0.06, 0.3127,0.329 ); else /* ITU-R BT.601-7 525-line, 60/1.001, field/s systems */ if(strcmp(my_space_profile,"*rec601.525.linear") == 0) h_my_space = lcm2CreateICCMatrixProfile2( 1.0, 0.63, 0.34, 0.31, 0.595, 0.155, 0.07, 0.3127,0.329 ); if(!h_my_space) h_my_space = cmsOpenProfileFromFile( full_name, "rb" ); if(!h_my_space) { lcm2msg_p( 300, NULL, "no profile from %s", full_name); } /*else printf("will use %s\n", full_name);*/ lcm2Free_m(full_name); } return h_my_space; } /** Function lcm2WriteProfileToFile * @brief Write a profile to a file * * Suggested is a scheme of "space version vendor.icc". * * @code // "My-Space_v1.0_myna.icc" char * file_name = lcm2WriteProfileToFile( my_space_profile, "My-Space", "v1.0", "myna" ); @endcode * * @param[in] my_space_profile the profile * @param[in] my_space_profile_name the color space name * @param[in] my_space_profile_version the version of the profile; optional * @param[in] vendor_four_bytes the vendor, just four bytes; optional * @return constructed file name; * can be released with free() * * @version Oyranos: 0.9.6 * @date 2016/03/06 * @since 2016/02/16 (Oyranos: 0.9.6) */ char * lcm2WriteProfileToFile ( cmsHPROFILE my_space_profile, const char * my_space_profile_name, const char * my_space_profile_version, const char * vendor_four_bytes ) { int i; i = 0; char * fn = (char*) malloc(strlen(my_space_profile_name) + (my_space_profile_version ? strlen(my_space_profile_version):0) + (vendor_four_bytes ? strlen(vendor_four_bytes):0) + 8); if(!fn) return fn; sprintf( fn, "%s%s%s%s%s%s", my_space_profile_name, my_space_profile_version ? " " : "", my_space_profile_version?my_space_profile_version:"", vendor_four_bytes ? " " : "", vendor_four_bytes?vendor_four_bytes:"", strstr(my_space_profile_name, ".icc") ? "" : ".icc" ); while(fn[i]) { if(fn[i] == ' ') fn[i] = '_'; ++i; } cmsSaveProfileToFile( my_space_profile, fn ); return fn; } /** Function lcm2WriteProfileToMem * * Save a cmsHPROFILE to a in memory data blob * * @version Oyranos: 0.9.7 * @since 2008/12/28 (Oyranos: 0.9.7) * @date 2017/06/07 */ void * lcm2WriteProfileToMem ( cmsHPROFILE * profile, size_t * size, void * (*allocateFunc)(size_t size) ) { int error = !profile; void * data = 0; cmsUInt32Number size_ = 0; if(!error) { *size = 0; if(!cmsSaveProfileToMem( profile, NULL, &size_ )) lcm2msg_p( 300, NULL, "cmsSaveProfileToMem failed" ); if(size_) { if(allocateFunc) data = allocateFunc( size_ ); else data = malloc( size_ ); cmsSaveProfileToMem( profile, data, &size_ ); } else lcm2msg_p( 300, NULL, "can not convert lcms2 profile to memory" ); *size = size_; } else lcm2msg_p( 301, NULL, "no profle" ); return data; } /* --- CIE*Lab space familiy --- */ /** \addtogroup samplers Samplers * @{ */ static double CIE_C_scaler = M_SQRT2; /* fit all Lab into LCh */ /** Function lcm2SamplerLab2LCh * @brief CIE*Lab -> CIE*LCh in PCS*Lab range * * The CIE*C channel is scaled to contain all CIE*Lab colors. * The ICC PCS*Lab space with range of 0->1 for all channels is utilised to * be useful as a sampler argument to lcm2CreateProfileLutByFunc(). * * @param[in] i input Lab triple * @param[out] o output LCh triple * @param[out] none unused * * @version Oyranos: 0.9.6 * @date 2016/03/13 * @since 2016/13/13 (Oyranos: 0.9.6) */ void lcm2SamplerLab2LCh ( const double i[], double o[], void * none OY_UNUSED ) { double a = (i[1] - 0.5) * CIE_C_scaler, b = (i[2] - 0.5) * CIE_C_scaler; /* CIE*L */ o[0] = i[0]; /* CIE*C = sqrt(CIE*a² + CIE*b²) */ o[1] = hypot(a,b); /* CIE*h = atan2(CIE*b, CIE*a) */ o[2] = atan2(b,a)/M_PI/2.0 + 0.5; } /** Function lcm2SamplerLCh2Lab * @brief CIE*LCh -> CIE*Lab in PCS*Lab range * * The CIE*C channel is scaled to contain all CIE*Lab colors. * The ICC PCS*Lab space with range of 0->1 for all channels is utilised to * be useful as a sampler argument to lcm2CreateProfileLutByFunc(). * * @param[in] i input LCh triple * @param[out] o output Lab triple * @param[out] none unused * * @version Oyranos: 0.9.7 * @date 2017/12/05 * @since 2016/13/13 (Oyranos: 0.9.6) */ void lcm2SamplerLCh2Lab ( const double i[], double o[], void * none OY_UNUSED ) { /* CIE*L */ o[0] = i[0]; /* CIE*a = C * cos(h) */ o[1] = 1.0 - (i[1] * cos(M_PI*2.0*i[2]) / CIE_C_scaler + 0.5); /* CIE*b = C * sin(h) */ o[2] = 1.0 - (i[1] * sin(M_PI*2.0*i[2]) / CIE_C_scaler + 0.5); } /* sRGB */ cmsViewingConditions lcm2_vc_srgb_ = { { 95.05, 100.0, 108.88 }, /* D65 white point */ 20, /* viewing background luminance Yb */ 4, /* ambient in cd/m² (== 64 lux) */ 2, /* Dim sourround */ 1 /* adapted (0-1) */ }; /** Function lcm2SamplerJCh2Lab * @brief CIE*LCh -> CIE*Lab in PCS*Lab range * * The CIE*C channel is scaled to contain all CIE*Lab colors. * The ICC PCS*Lab space with range of 0->1 for all channels is utilised to * be useful as a sampler argument to lcm2CreateProfileLutByFunc(). * * @param[in] i input LCh triple * @param[out] o output Lab triple * @param[in] v (cmsViewingConditions*); optional, default sRGB * * @version Oyranos: 0.9.7 * @date 2018/02/28 * @since 2018/02/28 (Oyranos: 0.9.7) */ void lcm2SamplerJCh2Lab ( const double i[], double o[], void * v ) { cmsViewingConditions * vc = &lcm2_vc_srgb_; cmsHANDLE vh; cmsCIEXYZ XYZ; cmsJCh JCh = { i[0], i[1], i[2] }; vh = cmsCIECAM02Init( NULL, v?v:vc ); cmsCIECAM02Reverse( vh, &JCh, &XYZ ); cmsCIECAM02Done( vh ); lcm2CIEXYZ2iccLab( &XYZ, o ); } /** Function lcm2SamplerLab2JCh * @brief CIE*Lab -> CIE*JCh * * The CIECAM02 appearance space. * * @param[in] i input Lab triple * @param[out] o output JCh triple * @param[in] v (cmsViewingConditions*); optional, default sRGB * * @version Oyranos: 0.9.7 * @date 2018/02/28 * @since 2018/02/28 (Oyranos: 0.9.7) */ void lcm2SamplerLab2JCh ( const double i[], double o[], void * v ) { cmsViewingConditions * vc = &lcm2_vc_srgb_; cmsHANDLE vh; cmsCIEXYZ XYZ; cmsJCh JCh; lcm2iccLab2CIEXYZ( i, &XYZ ); vh = cmsCIECAM02Init( NULL, v?v:vc ); cmsCIECAM02Forward( vh, &XYZ, &JCh ); cmsCIECAM02Done( vh ); o[0] = JCh.J; o[1] = JCh.C; o[2] = JCh.h; } /* --- YCbCr space familiy --- */ typedef enum { ITU_R_BT_601, ITU_R_BT_601_JPEG, ITU_REC_709, ITU_R_BT_2020 } ITU_Std_e; const char * ITU_Std_dscr [] = { "ITU-R BT.601", "ITU-R BT.601 / JPEG", "ITU REC-709", "ITU-R BT.2020", NULL }; static void selectKbKr( ITU_Std_e ITU_Std, double * Kb, double * Kr ) { switch(ITU_Std) { case ITU_R_BT_601: case ITU_R_BT_601_JPEG: // ITU-R BT.601 - JPEG *Kb = 0.114; *Kr = 0.299; break; case ITU_REC_709: // ITU REC-709 *Kb = 0.0722; *Kr = 0.2126; break; case ITU_R_BT_2020: // ITU-R BT.2020 *Kb = 0.0593; *Kr = 0.2627; break; } } void selectBlackScale( ITU_Std_e ITU_Std, double * black, double * scale ) { switch(ITU_Std) { case ITU_R_BT_601_JPEG: *black = 0; *scale = 255; break; case ITU_R_BT_601: case ITU_REC_709: case ITU_R_BT_2020: *black = 16; *scale = 219; break; } } void linear2ycbcr( double *L_ ) { double L = *L_; double alpha = 1.09929682680944, beta = 0.018053968510807; // linear -> gamma if(L < beta) L *= 4.5; else L = pow(L,0.45) - (alpha - 1); *L_ = L; } void ycbcr2linear( double *V_ ) { double L = *V_; double alpha = 1.09929682680944, beta = 0.081243; /* 0.018053968510807 * 4.5 */ // linear -> gamma if(L < beta) L /= 4.5; else L = pow( (L + (alpha-1)) / alpha, 1.0/0.45 ); *V_ = L; } static void rgb2ycbcr( double R, double G, double B, double *Y_, double *Pb_, double *Pr_, double Kb, double Kr ) { double Y,Pb,Pr; // common RGB -> YCbCr formula Y = Kr * R + (1.0-Kr-Kb) * G + Kb * B; Pb = 1.0/2.0 * (B-Y)/(1.0-Kb); Pr = 1.0/2.0 * (R-Y)/(1.0-Kr); *Y_ = Y; *Pb_ = Pb; *Pr_ = Pr; } static void ycbcr2rgb( double Y, double Pb, double Pr, double *R_, double *G_, double *B_, double Kb, double Kr ) { double R,G,B; // common YCbCr -> RGB formula // Pb = 1.0/2.0 * (B-Y)/(1.0-Kb); // 2*Pb = (B-Y)/(1-Kb) // 2*Pb*(1-Kb) = B-Y // 2*Pb*(1-Kb)+Y = B B = 2*Pb*(1-Kb) + Y; // Pr = 1.0/2.0 * (R-Y)/(1.0-Kr); // 2*Pr*(1-Kr)+Y = R R = 2*Pr*(1-Kr) + Y; // Y = Kr * R + (1.0-Kr-Kb) * G + Kb * B; // Y-(Kr*R)-(Kb*B) = (1-Kb-Kr) * G // (Y-(Kr*R)-(Kb*B))/(1-Kb-Kr) = G G = (Y - Kr*R - Kb*B)/(1.0-Kb-Kr); *R_ = R; *G_ = G; *B_ = B; } static void scaleRGB( ITU_Std_e ITU_Std, double scale, double * R, double * G, double * B ) { switch(ITU_Std) { case ITU_R_BT_601: // ITU-R BT.601 case ITU_REC_709: // ITU REC-709 case ITU_R_BT_2020: // ITU-R BT.2020 case ITU_R_BT_601_JPEG: // ITU-R BT.601 - JPEG *R *= scale; *G *= scale; *B *= scale; break; } } static void scaleLinearToYCbCr( ITU_Std_e ITU_Std, double max, double * Y, double * Cb, double * Cr ) { max /= 255.0; switch(ITU_Std) { case ITU_R_BT_601: // ITU-R BT.601 case ITU_REC_709: // ITU REC-709 case ITU_R_BT_2020: // ITU-R BT.2020 *Y *= (235.*max-16.*max); *Y += 16.*max; *Cb *= (240.*max-16.*max); *Cb += 128.*max; *Cr *= (240.*max-16.*max); *Cr += 128.*max; break; case ITU_R_BT_601_JPEG: // ITU-R BT.601 - JPEG *Y *= 255.*max; *Cb *= 255.*max; *Cb += 128.*max; *Cr *= 255.*max; *Cr += 128.*max; break; } } static void scaleYCbCrToLinear( ITU_Std_e ITU_Std, double max, double * Y, double * Cb, double * Cr ) { max /= 255.0; switch(ITU_Std) { case ITU_R_BT_601: // ITU-R BT.601 case ITU_REC_709: // ITU REC-709 case ITU_R_BT_2020: // ITU-R BT.2020 *Y -= 16.*max; *Y /= (235.*max-16.*max); *Cb -= 128.*max; *Cb /= (240.*max-16.*max); *Cr -= 128.*max; *Cr /= (240.*max-16.*max); break; case ITU_R_BT_601_JPEG: // ITU-R BT.601 - JPEG *Y /= 255.*max; *Cb -= 128.*max; *Cb /= 255.*max; *Cr -= 128.*max; *Cr /= 255.*max; break; } } /** Function lcm2SamplerRGB2JpegYCbCr * @brief RGB -> YCbCr in Jpeg range * * ITU R BT 601 / REC.601 coefficients with Jpeg range of 0-1 is generated. * * @param[in] i input RGB triple * @param[out] o output REC.601 YCbCr in JPEG range triple * @param[out] none unused * * @version Oyranos: 0.9.6 * @date 2016/03/13 * @since 2016/03/07 (Oyranos: 0.9.6) */ void lcm2SamplerRGB2JpegYCbCr ( const double i[], double o[], void * none OY_UNUSED ) { /* final space PCS.Lab -> YCbCr */ /** Jpeg assumes no gamma correction. * Thus this sampler converts from RGB. */ ITU_Std_e std = ITU_R_BT_601_JPEG; double Kr,Kb, Y = i[0], Pb = i[1], Pr = i[2], R = i[0], G = i[1], B = i[2]; selectKbKr( std, &Kb, &Kr ); scaleRGB( std, 1.0, &R, &G, &B ); rgb2ycbcr( R, G, B, &Y, &Pb, &Pr, Kb,Kr ); scaleLinearToYCbCr( std, 1.0, &Y, &Pb, &Pr ); o[0] = Y; o[1] = Pb; o[2] = Pr; } /** Function lcm2SamplerRGB2JpegYCbCr * @brief YCbCr in Jpeg range -> RGB * * ITU R BT 601 / REC.601 coefficients in Jpeg range of 0-1 is assumed. * * @param[in] i input REC.601 YCbCr in JPEG range triple * @param[out] o output RGB triple * @param[out] none unused * * @version Oyranos: 0.9.6 * @date 2016/03/13 * @since 2016/03/13 (Oyranos: 0.9.6) */ void lcm2SamplerJpegYCbCr2RGB( const double i[], double o[], void * none OY_UNUSED ) { /* final space YCbCr -> PCS.Lab * Jpeg assumes no gamma correction * Thus this sampler converts to RGB * YCbCr -> scale range -> linear YCbCr -> (linear RGB (REC.709) -> Lab) */ ITU_Std_e std = ITU_R_BT_601_JPEG; double Kr,Kb, Y = i[0], Pb = i[1], Pr = i[2], R,G,B; selectKbKr( std, &Kb, &Kr ); scaleYCbCrToLinear( std, 1.0, &Y, &Pb, &Pr ); ycbcr2rgb( Y, Pb, Pr, &R, &G, &B, Kb,Kr ); scaleRGB( std, 1.0, &R, &G, &B ); o[0] = R; o[1] = G; o[2] = B; } /** Function lcm2SamplerIdendity * @brief Lab -> Lab * * PCS Lab range of 0-1 for all channels is assumed. * * @param[in] i input PCS.Lab triple * @param[out] o output PCS.Lab triple * @param[out] none unused * * @version Oyranos: 0.9.7 * @date 2018/02/26 * @since 2018/02/26 (Oyranos: 0.9.7) */ void lcm2SamplerIdendity ( const double i[], double o[], void * none OY_UNUSED ) { o[0] = i[0]; // L / CIE*L / Y / R o[1] = i[1]; // M / CIE*a / Cb / G o[2] = i[2]; // S / CIE*b / Cr / B } /** Function lcm2SamplerGrayer * @brief Lab -> Gray -> Lab * * PCS Lab range of 0-1 for all channels is assumed. * * @param[in] i input PCS.Lab triple * @param[out] o output PCS.Lab triple * @param[out] none unused * * @version Oyranos: 0.9.6 * @date 2016/03/13 * @since 2016/03/13 (Oyranos: 0.9.6) */ void lcm2SamplerGrayer ( const double i[], double o[], void * none OY_UNUSED ) { o[0] = i[0]*1.0; // L / CIE*L / Y / R o[1] = 0.5; // M / CIE*a / Cb / G o[2] = 0.5; // S / CIE*b / Cr / B } /** Function lcm2SamplerBlacknWhite * @brief Lab -> Black&White -> Lab * * PCS Lab range of 0-1 for all channels is assumed. * * @param[in] i input PCS.Lab triple * @param[out] o output PCS.Lab triple * @param[out] none unused * * @version Oyranos: 0.9.6 * @date 2016/03/13 * @since 2016/03/13 (Oyranos: 0.9.6) */ void lcm2SamplerBlacknWhite ( const double i[], double o[], void * none OY_UNUSED ) { if(i[0] <= 0.5) o[0] = 0.0; // L / CIE*L / Y / R else o[0] = 1.0; // L / CIE*L / Y / R o[1] = 0.5; // M / CIE*a / Cb / G o[2] = 0.5; // S / CIE*b / Cr / B } /** Function lcm2SamplerSepia * @brief Lab -> LCh -> Yellow -> LCh -> Lab * * PCS Lab range of 0-1 for all channels is assumed. * Creates a single reddish hue. * * @param[in] i input PCS.Lab triple * @param[out] o output PCS.Lab triple * @param[out] none unused * * @version Oyranos: 0.9.6 * @date 2016/03/14 * @since 2016/03/14 (Oyranos: 0.9.6) */ void lcm2SamplerSepia ( const double i[], double o[], void * none ) { double in[3],out[3]; lcm2SamplerLab2LCh( i,in,none ); out[0] = in[0]; out[1] = 0.04+0.04*in[0]; out[2] = 0.18; lcm2SamplerLCh2Lab( out,o,none ); } /** Function lcm2SamplerReddish * @brief Lab -> reddish tint -> Lab * * PCS Lab range of 0-1 for all channels is assumed. * Same like Sepia, but gives all colors a reddish tint. * * @param[in] i input PCS.Lab triple * @param[out] o output PCS.Lab triple * @param[out] none unused * * @version Oyranos: 0.9.6 * @date 2016/03/15 * @since 2016/03/15 (Oyranos: 0.9.6) */ void lcm2SamplerReddish ( const double i[], double o[], void * none OY_UNUSED ) { o[0] = i[0]; o[1] = i[1] + 0.012+0.012*i[0]; o[2] = i[2] + 0.025+0.025*i[0]; } /** Function lcm2SamplerWhitePointLab * @brief Lab -> White Point Adaption -> Lab * * PCS Lab range of 0-1 for all channels is assumed. * Same like reddish, but adapts all colors to a given white point difference. * It uses simple linear adaption inside CIE*Lab. * * @param[in] i input PCS.Lab triple * @param[out] o output PCS.Lab triple * @param[out] data pointer to array of two doubles with * desired ICC*ab differences * * @version Oyranos: 0.9.7 * @date 2017/05/17 * @since 2017/05/17 (Oyranos: 0.9.7) */ void lcm2SamplerWhitePointLab( const double i[], double o[], void * data ) { double * icc_ab = (double*) data; o[0] = i[0]; o[1] = i[1] + icc_ab[0] * i[0]; o[2] = i[2] + icc_ab[1] * i[0]; } /** Function lcm2iccLab2CIEXYZ * @brief ICC*Lab -> CIE*XYZ * * Converts from PCS Lab encoding to lcms XYZ type. * * @param[in] icc_Lab input Lab triple in PCS range * @param[out] XYZ output XYZ struct * * @version Oyranos: 0.9.7 * @date 2018/02/28 * @since 2018/02/28 (Oyranos: 0.9.7) */ void lcm2iccLab2CIEXYZ ( const double * icc_Lab, cmsCIEXYZ * XYZ ) { cmsCIELab Lab; Lab.L = icc_Lab[0] * 100.0; Lab.a = icc_Lab[1] * 257.0 - 128.0; Lab.b = icc_Lab[2] * 257.0 - 128.0; cmsLab2XYZ( cmsD50_XYZ(), XYZ, &Lab); } /** Function lcm2CIEXYZ2iccLab * @brief CIE*XYZ -> ICC*Lab * * Converts from lcms XYZ type to PCS Lab encoding. * * @param[in] XYZ input XYZ struct * @param[out] icc_Lab output Lab triple in PCS range * * @version Oyranos: 0.9.7 * @date 2018/02/28 * @since 2018/02/28 (Oyranos: 0.9.7) */ void lcm2CIEXYZ2iccLab ( const cmsCIEXYZ * XYZ, double * icc_Lab ) { cmsCIELab Lab; cmsXYZ2Lab( cmsD50_XYZ(), &Lab, XYZ ); icc_Lab[0] = Lab.L / 100.0; icc_Lab[1] = (Lab.a + 128.0) / 257.0; icc_Lab[2] = (Lab.b + 128.0) / 257.0; } /** Function lcm2iccXYZ2iccLab * @brief ICC*XYZ -> ICC*Lab * * Converts from PCS XYZ to PCS Lab encoding. * * @param[in] XYZ input XYZ triple * @param[out] icc_Lab output Lab triple in PCS range * * @version Oyranos: 0.9.7 * @date 2018/02/28 * @since 2018/02/28 (Oyranos: 0.9.7) */ void lcm2iccXYZ2iccLab ( const double * XYZ, double * icc_Lab ) { cmsCIEXYZ XYZ_ = { XYZ[0], XYZ[1], XYZ[2] }; lcm2CIEXYZ2iccLab( &XYZ_, icc_Lab ); } /** Function lcm2SamplerWhitePointBradford * @brief Lab -> Bradford White Point Adaption -> Lab * * PCS Lab range of 0-1 for all channels is assumed. * Same like reddish, but adapts all colors to a given white point difference. * It uses Bradford CAT. * * @param[in] i input PCS.Lab triple * @param[out] o output PCS.Lab triple * @param[out] data pointer to array of two doubles with * source ICC*XYZ white point, followed by * destination ICC*XYZ whitepoint * * @version Oyranos: 0.9.7 * @date 2018/02/28 * @since 2018/02/28 (Oyranos: 0.9.7) */ void lcm2SamplerWhitePointBradford ( const double i[], double o[], void * data ) { double * icc_XYZ = (double*) data; double scale = 100.0; cmsCIEXYZ srcXYZwtpt, iXYZ, oXYZ, dstXYZillu; srcXYZwtpt.X = icc_XYZ[0] * scale; srcXYZwtpt.Y = icc_XYZ[1] * scale; srcXYZwtpt.Z = icc_XYZ[2] * scale; dstXYZillu.X = icc_XYZ[3+0] * scale; dstXYZillu.Y = icc_XYZ[3+1] * scale; dstXYZillu.Z = icc_XYZ[3+2] * scale; lcm2iccLab2CIEXYZ( i, &iXYZ ); cmsAdaptToIlluminant( &oXYZ, &srcXYZwtpt, &dstXYZillu, &iXYZ ); lcm2CIEXYZ2iccLab( &oXYZ, o ); } /** Function lcm2SamplerProof * @brief Lab -> proofing profile -> Lab * * Convert a proofing profile into a abstract one. * Abstract profiles can easily be merged into a multi profile transform. * PCS Lab range of 0-1 for all channels is assumed. * * @param[in] i input PCS.Lab triple * @param[out] o output PCS.Lab triple * @param[out] data pointer to array of two void* with * - desired cmsHTRANSFORM * for uint32_t arrays in PT_Lab * - cmsFLAGS_GAMUTCHECK flag * * @version Oyranos: 0.9.7 * @since 2009/11/04 (Oyranos: 0.1.10) * @date 2017/06/03 */ void lcm2SamplerProof ( const double i[], double o[], void * data ) { cmsCIELab Lab1, Lab2; double d; cmsFloat32Number i_[3], o_[3]; void ** ptr = (void**)data; i_[0] = Lab1.L = i[0] * 100.0; i_[1] = Lab1.a = i[1] * 257.0 - 128.0; i_[2] = Lab1.b = i[2] * 257.0 - 128.0; cmsDoTransform( ptr[0], i_, o_, 1 ); Lab2.L = o_[0]; Lab2.a = o_[1]; Lab2.b = o_[2]; d = cmsDeltaE( &Lab1, &Lab2 ); if((fabs(d) > 10) && ptr[1] != NULL) { Lab2.L = 50.0; Lab2.a = Lab2.b = 0.0; } o[0] = Lab2.L/100.0; o[1] = (Lab2.a + 128.0) / 257.0; o[2] = (Lab2.b + 128.0) / 257.0; } /** Function lcm2SamplerProofD * @brief Lab -> proofing profile -> Lab * * Convert a proofing profile into a abstract one. * Abstract profiles can easily be merged into a multi profile transform. * PCS Lab range of 0-1 for all channels is assumed. * * @param[in] i input PCS.Lab triple * @param[out] o output PCS.Lab triple * @param[out] data pointer to array of two void* with * - desired cmsHTRANSFORM and * for uint64_t arrays in PT_Lab * - cmsFLAGS_GAMUTCHECK flag * * @version Oyranos: 0.9.7 * @since 2009/11/04 (Oyranos: 0.1.10) * @date 2017/11/06 */ void lcm2SamplerProofD ( const double i[], double o[], void * data ) { cmsCIELab Lab1, Lab2; double d; cmsFloat64Number i_[3], o_[3]; void ** ptr = (void**)data; i_[0] = Lab1.L = i[0] * 100.0; i_[1] = Lab1.a = i[1] * 257.0 - 128.0; i_[2] = Lab1.b = i[2] * 257.0 - 128.0; cmsDoTransform( ptr[0], i_, o_, 1 ); Lab2.L = o_[0]; Lab2.a = o_[1]; Lab2.b = o_[2]; d = cmsDeltaE( &Lab1, &Lab2 ); if((fabs(d) > 10) && ptr[1] != NULL) { Lab2.L = 50.0; Lab2.a = Lab2.b = 0.0; } o[0] = Lab2.L/100.0; o[1] = (Lab2.a + 128.0) / 257.0; o[2] = (Lab2.b + 128.0) / 257.0; } /** @} */ /* samplers */ /** Function lcm2CreateProfileLutByFunc * @brief Generate a ICC profile LUT * * This function takes a series of parameters and functions to create a * ICC profile from. The sampler function operates in a input space and * and creates colors in a output space. These values are filled into the * profile LUT. It is possible to create effect profiles of class abstract * or LUT profiles in any other color space including device links. * * For some already available sampler funtions see @ref samplers. * * @param[in,out] profile profile to add LUT table * @param[in] samplerMySpace the function to fill the LUT with color * @param[in] samplerArg data pointer to samplerMySpace * @param[in] my_space_profile operating color space * for samplerMySpace(); for wildcards see * lcm2OpenProfileFile() * @param[in] in_space_profile input color space * for samplerMySpace(); for wildcards see * lcm2OpenProfileFile() * @param[in] out_space_profile output color space * for samplerMySpace(); for wildcards see * lcm2OpenProfileFile() * @param[in] grid_size dimensions of the created LUT; e.g. 33 * @param[in] tag_sig tag signature for the generated LUT; * * @version Oyranos: 0.9.7 * @date 2017/05/17 * @since 2009/11/04 (Oyranos: 0.1.10) */ int lcm2CreateProfileLutByFunc ( cmsHPROFILE profile, lcm2Sampler_f samplerMySpace, void * samplerArg, const char * in_space_profile, const char * my_space_profile, const char * out_space_profile, int grid_size, cmsTagSignature tag_sig ) { cmsToneCurve * t[max_channels]; int i; int error = 0; if(!profile) return 1; t[0] = cmsBuildGamma(0, 1.0); if(!t[0]) return 1; for(i = 1; i < max_channels; ++i) t[i] = t[0]; error = lcm2CreateProfileLutByFuncAndCurves ( profile, samplerMySpace, samplerArg, t, t, in_space_profile, my_space_profile, out_space_profile, grid_size, tag_sig ); cmsFreeToneCurve( t[0] ); return error; } /** Function lcm2CreateProfileLutByFuncAndCurves * @brief Generate a ICC profile LUT * * This function takes a series of parameters and functions to create a * ICC profile from. The sampler function operates in a input space and * and creates colors in a output space. These values are filled into the * profile LUT. It is possible to create effect profiles of class abstract * or LUT profiles in any other color space including device links. * * For some already available sampler funtions see @ref samplers. * * @param[in,out] profile profile to add LUT table * @param[in] samplerMySpace the function to fill the LUT with color * @param[in] samplerArg data pointer to samplerMySpace * @param[in] in_curves input curves * @param[in] out_curves output curves * @param[in] my_space_profile operating color space * for samplerMySpace(); for wildcards see * lcm2OpenProfileFile() * @param[in] in_space_profile input color space * for samplerMySpace(); for wildcards see * lcm2OpenProfileFile() * @param[in] out_space_profile output color space * for samplerMySpace(); for wildcards see * lcm2OpenProfileFile() * @param[in] grid_size dimensions of the created LUT; e.g. 33 * @param[in] tag_sig tag signature for the generated LUT; * * @version Oyranos: 0.9.6 * @date 2017/05/17 * @since 2009/11/04 (Oyranos: 0.1.10) */ int lcm2CreateProfileLutByFuncAndCurves ( cmsHPROFILE profile, lcm2Sampler_f samplerMySpace, void * samplerArg, cmsToneCurve * in_curves[], cmsToneCurve * out_curves[], const char * in_space_profile, const char * my_space_profile, const char * out_space_profile, int grid_size, cmsTagSignature tag_sig ) { cmsHPROFILE h_in_space = 0, h_my_space = 0, h_out_space = 0; cmsHTRANSFORM tr_In2MySpace = 0, tr_MySpace2Out = 0; cmsStage * gmt_lut = 0, * gmt_lut16 = 0; cmsPipeline * gmt_pl = cmsPipelineAlloc( 0,3,3 ), * gmt_pl16 = cmsPipelineAlloc( 0,3,3 ); lcm2Cargo_s cargo; int i; int error = 0; int in_layout, my_layout, out_layout; in_layout = my_layout = out_layout = (FLOAT_SH(1)|CHANNELS_SH(3)|BYTES_SH(0)); if(!profile) return 1; if(in_space_profile) h_in_space = lcm2OpenProfileFile( in_space_profile, NULL ); if(my_space_profile) h_my_space = lcm2OpenProfileFile( my_space_profile, NULL ); if(out_space_profile)h_out_space = lcm2OpenProfileFile( out_space_profile, NULL ); if(h_in_space && h_my_space && strcmp(in_space_profile,my_space_profile) != 0) { tr_In2MySpace = cmsCreateTransformTHR ( 0, h_in_space, in_layout, h_my_space, my_layout, INTENT_RELATIVE_COLORIMETRIC, cmsFLAGS_NOOPTIMIZE); if(!tr_In2MySpace) { lcm2msg_p( 300, NULL, "no transform"); error = 1; goto lcm2CreateProfileLutByFuncAndCurvesClean; } } if(h_my_space && h_out_space && strcmp(my_space_profile,out_space_profile) != 0) { tr_MySpace2Out = cmsCreateTransformTHR( 0, h_my_space, my_layout, h_out_space, out_layout, INTENT_RELATIVE_COLORIMETRIC, cmsFLAGS_NOOPTIMIZE); if(!tr_MySpace2Out) { lcm2msg_p( 300, NULL, "no transform"); error = 1; goto lcm2CreateProfileLutByFuncAndCurvesClean; } } memset(&cargo, 0, sizeof(lcm2Cargo_s)); cargo.in2MySpace = tr_In2MySpace; cargo.mySpace2Out = tr_MySpace2Out; cargo.sampler = samplerMySpace; cargo.sampler_variables = samplerArg, cargo.channelsIn = h_in_space ? cmsChannelsOf( cmsGetColorSpace( h_in_space ) ) : 3; cargo.channelsProcess = h_my_space ? cmsChannelsOf( cmsGetColorSpace( h_my_space ) ) : 3; cargo.channelsOut = h_out_space ? cmsChannelsOf( cmsGetColorSpace( h_out_space ) ) : 3; #pragma omp parallel for for(i = 0; i < 2; ++i) { if(i) { gmt_lut16 = cmsStageAllocCLut16bit( 0, grid_size, 3,3,0 ); cmsStageSampleCLut16bit( gmt_lut16, lcm2sampler16, &cargo, 0 ); } else { gmt_lut = cmsStageAllocCLutFloat( 0, grid_size, 3,3,0 ); cmsStageSampleCLutFloat( gmt_lut, lcm2samplerFloat, &cargo, 0 ); } } /* 16-bit int */ cmsPipelineInsertStage( gmt_pl16, cmsAT_BEGIN, cmsStageAllocToneCurves( 0, cargo.channelsIn, in_curves ) ); cmsPipelineInsertStage( gmt_pl16, cmsAT_END, gmt_lut16 ); cmsPipelineInsertStage( gmt_pl16, cmsAT_END, cmsStageAllocToneCurves( 0, cargo.channelsOut, out_curves ) ); cmsWriteTag( profile, (tag_sig!=0)?tag_sig:cmsSigAToB0Tag, gmt_pl16 ); /* float */ /* cmsPipeline owns the cmsStage memory */ cmsPipelineInsertStage( gmt_pl, cmsAT_BEGIN, cmsStageAllocToneCurves( 0, cargo.channelsIn, in_curves ) ); cmsPipelineInsertStage( gmt_pl, cmsAT_END, gmt_lut ); cmsPipelineInsertStage( gmt_pl, cmsAT_END, cmsStageAllocToneCurves( 0, cargo.channelsOut, out_curves ) ); //cmsWriteTag( gmt, cmsSigDToB0Tag, gmt_pl ); lcm2CreateProfileLutByFuncAndCurvesClean: if(h_in_space) {cmsCloseProfile( h_in_space );} h_in_space = 0; if(h_my_space) {cmsCloseProfile( h_my_space );} h_my_space = 0; if(h_out_space) {cmsCloseProfile( h_out_space );} h_out_space = 0; if(tr_In2MySpace) {cmsDeleteTransform( tr_In2MySpace );} tr_In2MySpace = 0; if(tr_MySpace2Out) {cmsDeleteTransform( tr_MySpace2Out );} tr_MySpace2Out = 0; if(gmt_pl16) cmsPipelineFree( gmt_pl16 ); if(gmt_pl) cmsPipelineFree( gmt_pl ); return error; } /** Function lcm2CreateAbstractProfile * @brief Create a effect profile of type abstract in ICC*Lab PCS * * Here a code example: * @code void samplerGrayer (const double i[], double o[]) { o[0] = i[0]*1.0; // L / CIE*L / Y / R o[1] = 0.5; // M / CIE*a / Cb / G o[2] = 0.5; // S / CIE*b / Cr / B } const char * name_i18n[] = { "de", "DE", "Graustufen (MyProject)", "en", "US", "Grayer (MyProject)" }; lcm2CreateAbstractProfile ( samplerGrayer, NULL, "*lab", // CIE*Lab 5, 2.3, "Grayer (MyProject)", name_i18n, "Grayer myna", "My Project 2016", "My Name", ICC_2011_LICENSE, "CIE*L", "http://www.cie.co.at", NULL, NULL ); @endcode * * @param[in] samplerMySpace the function to fill the LUT with color * @param[in] samplerArg data pointer to samplerMySpace * @param[in] my_space_profile operating color space * for samplerMySpace(); * "*lab" will set CIE*Lab * @param[in] grid_size dimensions of the created LUT; e.g. 33 * @param[in] icc_profile_version 2.3 or 4.3 * @param[in] my_abstract_description internal profile name * @param[in] my_abstract_descriptions internal profile name translated * @param[in] my_abstract_file_name profile file name. If present a ICC profile will be written to that name. optional * @param[in] provider e.g. "My Project 2016" * @param[in] vendor e.g. "My Name" * @param[in] my_license e.g. "This profile is made available by %s, with permission of %s, and may be copied, distributed, embedded, made, used, and sold without restriction. Altered versions of this profile shall have the original identification and copyright information removed and shall not be misrepresented as the original profile." * - first %%s is replaced by the provider string arg and * - second %%s is replaced by the vendor string arg * @param[in] device_model e.g. "My Set" * @param[in] device_manufacturer e.g. "www.mydomain.net" * @param[in] my_meta_data e.g. {"DOMAIN_,GROUP_","DOMAIN_key1","value1","GROUP_key2","value2"} * @param[out] h_profile the resulting profile * * @version Oyranos: 0.9.7 * @date 2017/05/17 * @since 2009/11/04 (Oyranos: 0.1.10) */ int lcm2CreateAbstractProfile( lcm2Sampler_f samplerMySpace, void * samplerArg, const char * my_space_profile, int grid_size, double icc_profile_version, const char * my_abstract_description, const char ** my_abstract_descriptions, const char * my_abstract_file_name, const char * provider, const char * vendor, const char * my_license, const char * device_model, const char * device_manufacturer, const char ** my_meta_data, cmsHPROFILE * h_profile ) { cmsHPROFILE profile = 0; int error = 0; profile = lcm2CreateProfileFragment ( "*lab", // CIE*Lab "*lab", // CIE*Lab icc_profile_version, my_abstract_description, provider, vendor, my_license, device_model, device_manufacturer, NULL); if(!profile) goto lcm2CreateAbstractProfileClean; if(my_meta_data) lcm2AddMetaTexts ( profile, my_meta_data[0], &my_meta_data[1], cmsSigMetaTag ); error = lcm2CreateProfileLutByFunc( profile, samplerMySpace, samplerArg, "*lab", my_space_profile, "*lab", grid_size, cmsSigAToB0Tag ); if(error) goto lcm2CreateAbstractProfileClean; lcm2AddMluDescription ( profile, my_abstract_descriptions, cmsSigProfileDescriptionMLTag ); if(my_abstract_file_name) { char * fn = lcm2WriteProfileToFile( profile, my_abstract_file_name, 0,0 ); lcm2msg_p( 302, NULL, "wrote to: %s", fn?fn:"----"); lcm2Free_m(fn); } if(h_profile) *h_profile = profile; else cmsCloseProfile( profile ); lcm2CreateAbstractProfileClean: return error; } /** Function lcm2CreateAbstractTemperatureProfile * @brief Create a effect profile of type abstract in ICC*Lab PCS from Kelvin * * @param[in] kelvin the desired temperature in Kelvin; ICC reference (D50) is 5000 Kelvin * @param[in] source_white_profile a profile, e.g. the actual monitor profile; optional, default is D50 * @param[in] grid_size dimensions of the created LUT; e.g. 33 * @param[in] icc_profile_version 2.3 or 4.3 * @param[out] my_abstract_file_name profile file name * @param[out] h_profile the resulting profile; If omitted the function will write the profile to my_abstract_file_name. * * @version Oyranos: 0.9.7 * @date 2017/05/17 * @since 2017/05/17 (Oyranos: 0.9.7) */ int lcm2CreateAbstractTemperatureProfile ( float kelvin, cmsHPROFILE source_white_profile, int grid_size, double icc_profile_version, char ** my_abstract_file_name, cmsHPROFILE * h_profile ) { cmsHPROFILE profile = NULL; cmsToneCurve * i_curve[3] = {NULL,NULL,NULL}, * o_curve[3] = {NULL,NULL,NULL}; /* type[6] Y = (a * X + b) ^ Gamma + c order: {g, a, b, c} */ double curve_params[4] = {1,1,0,0}, curve_params_low[4] = {1,0.95,0,0}; int i; cmsCIEXYZ * source_white = NULL; const char * kelvin_meta[] = { "EFFECT_class", "reddish,white_point,atom", "EFFECT_type", "CIEab", "COLORIMETRY_white_point", "yes,reddish,kelvin", "CMF_binary", "create-abstract", "CMF_version", "0.9.7", "CMF_product", "Oyranos", 0,0 }; char * kelvin_name = malloc(1024); int error = !kelvin_name; double icc_ab[2]; char * desc = NULL; if(error) return 1; if(source_white_profile) { if(cmsIsTag(source_white_profile, cmsSigProfileDescriptionTag)) { cmsUInt32Number n = cmsGetProfileInfoASCII(source_white_profile, cmsInfoDescription, cmsNoLanguage, cmsNoCountry, NULL, 0); if(n) { desc = calloc( n+1, sizeof(char) ); if(!desc) goto lcm2CreateAbstractTemperatureProfileClean; cmsUInt32Number nr = cmsGetProfileInfoASCII(source_white_profile, cmsInfoDescription, cmsNoLanguage, cmsNoCountry, desc, n); if(n != nr) lcm2msg_p( 301, NULL, "found propblem reading desc tag: %d %d", n,nr); } } source_white = cmsReadTag( source_white_profile, cmsSigMediaWhitePointTag ); // MediaWhitePointTag } i_curve[0] = o_curve[0] = cmsBuildGamma(0, 1.0); if(!i_curve[0]) error = 1; for(i = 1; i < 3; ++i) { i_curve[i] = i_curve[0]; } if(!error) { cmsCIExyY xyWhitePoint; cmsFloat64Number TempK = kelvin; /* 4000 - 25000 K */ cmsWhitePointFromTemp( &xyWhitePoint, TempK ); cmsCIEXYZ WhitePoint; const cmsCIEXYZ * reference_white = cmsD50_XYZ(); float max_brightness; cmsxyY2XYZ( &WhitePoint, &xyWhitePoint ); cmsCIELab LabWhitePoint; cmsCIELab SrcLabWhitePoint; if(source_white) reference_white = source_white; cmsXYZ2Lab( reference_white, &LabWhitePoint, &WhitePoint ); icc_ab[0] = LabWhitePoint.a/128.0; icc_ab[1] = LabWhitePoint.b/128.0; #ifndef OY_HYP #define OY_SQRT(a,b) ((a)*(a) + (b)*(b)) #define OY_HYP(a,b) pow(OY_SQRT(a,b),1.0/2.0) #endif /* reduce brightness remaining inside a cone with a roof angle of 30° */ max_brightness = 1.0 - OY_HYP(icc_ab[0],icc_ab[1]/1.5); cmsXYZ2Lab( cmsD50_XYZ(), &SrcLabWhitePoint, reference_white ); cmsXYZ2Lab( cmsD50_XYZ(), &LabWhitePoint, &WhitePoint ); lcm2msg_p( 302, NULL, "SrcW: %g %g %g LabW: %g %g %g diff: %g %g max brightness: %g", SrcLabWhitePoint.L, SrcLabWhitePoint.a, SrcLabWhitePoint.b, LabWhitePoint.L, LabWhitePoint.a, LabWhitePoint.b, icc_ab[0], icc_ab[1], max_brightness ); /* avoid color clipping around the white point */ curve_params_low[1] = max_brightness; o_curve[0] = cmsBuildParametricToneCurve(0, 6, curve_params_low); o_curve[1] = o_curve[2] = cmsBuildParametricToneCurve(0, 6, curve_params); if(!o_curve[0] || !o_curve[1]) error = 1; } if(error) goto lcm2CreateAbstractTemperatureProfileClean; if(icc_ab[1] > 0) { sprintf( kelvin_name, "Reddish %d K (www.oyranos.org)", (int)kelvin ); } else if(icc_ab[1] == 0) { sprintf( kelvin_name, "%d K (www.oyranos.org)", (int)kelvin ); kelvin_meta[1] = "neutral,white_point,atom"; kelvin_meta[3] = "yes,D50,kelvin"; } else { sprintf( kelvin_name, "Bluish %d K (www.oyranos.org)", (int)kelvin ); kelvin_meta[1] = "bluish,white_point,atom"; kelvin_meta[3] = "yes,bluish,kelvin"; } if(source_white_profile) { if(desc && strlen(desc) < 900) sprintf( &kelvin_name[strlen(kelvin_name)], " - %s", desc); if(icc_ab[1] > 0) { kelvin_meta[1] = "reddish,white_point,atom,device"; kelvin_meta[3] = "yes,reddish,kelvin"; } else if(icc_ab[1] == 0) { kelvin_meta[1] = "neutral,white_point,atom,device"; kelvin_meta[3] = "yes,D50,kelvin"; } else { kelvin_meta[1] = "bluish,white_point,atom,device"; kelvin_meta[3] = "yes,bluish,kelvin"; } } if(!error) /* profile fragment creation */ profile = lcm2CreateProfileFragment ( "*lab", // CIE*Lab "*lab", // CIE*Lab icc_profile_version, kelvin_name, "Oyranos project 2017", "Kai-Uwe Behrmann", ICC_2011_LICENSE, "CIE*Lab", "http://www.cie.co.at", NULL); if(!profile) error = 1; if(!error) error = lcm2CreateProfileLutByFuncAndCurves( profile, lcm2SamplerWhitePointLab, icc_ab, o_curve, i_curve, "*lab", "*lab", "*lab", grid_size, cmsSigAToB0Tag ); if(!error) lcm2AddMetaTexts ( profile, "EFFECT_,COLORIMETRY_,CMF_", kelvin_meta, cmsSigMetaTag ); lcm2CreateAbstractTemperatureProfileClean: if(i_curve[0]) cmsFreeToneCurve( i_curve[0] ); if(o_curve[0]) cmsFreeToneCurve( o_curve[0] ); if(o_curve[1]) cmsFreeToneCurve( o_curve[1] ); *my_abstract_file_name = kelvin_name; if(h_profile) *h_profile = profile; else if(profile && *my_abstract_file_name) { char * fn = lcm2WriteProfileToFile( profile, *my_abstract_file_name, 0,0 ); lcm2msg_p( 302, NULL, "wrote to: %s", fn?fn:"----"); lcm2Free_m(fn); cmsCloseProfile( profile ); } return error; } /** Function lcm2CreateAbstractWhitePointProfileLab * @brief Create a effect profile of type abstract in ICC*Lab PCS for white point adjustment * * These profiles can be applied to 1D / per single channel only adjustments. * It will be marked with EFFECT_linear=yes in the meta tag. * * @param[in] cie_a CIE*a correction value in -0.5 - 0.5 range * @param[in] cie_b CIE*b correction value in -0.5 - 0.5 range * @param[in] grid_size dimensions of the created LUT; e.g. 33 * @param[in] icc_profile_version 2.3 or 4.3 * @param[out] my_abstract_file_name profile file name * @param[out] h_profile the resulting profile; If omitted the function will write the profile to my_abstract_file_name. * * @version Oyranos: 0.9.7 * @date 2018/02/28 * @since 2017/06/02 (Oyranos: 0.9.7) */ int lcm2CreateAbstractWhitePointProfileLab ( double cie_a, double cie_b, int grid_size, double icc_profile_version, char ** my_abstract_file_name, cmsHPROFILE * h_profile ) { cmsHPROFILE profile = NULL; cmsToneCurve * i_curve[3] = {NULL,NULL,NULL}, * o_curve[3] = {NULL,NULL,NULL}; /* type[6] Y = (a * X + b) ^ Gamma + c order: {g, a, b, c} */ double curve_params[4] = {1,1,0,0}, curve_params_low[4] = {1,0.95,0,0}; int i; const char * kelvin_meta[] = { "EFFECT_class", "reddish,white_point,linear,atom", "EFFECT_linear", "yes", /* can be used for 1D curves like VCGT */ "EFFECT_type", "CIEab", "COLORIMETRY_white_point", "yes,reddish,kelvin", "CMF_binary", "create-abstract", "CMF_version", "0.9.7", "CMF_product", "Oyranos", 0,0 }; char * kelvin_name = malloc(1024); int error = !kelvin_name; double icc_ab[2] = {cie_a, cie_b}; if(error) return 1; i_curve[0] = cmsBuildGamma(0, 1.0); if(!i_curve[0]) error = 1; for(i = 1; i < 3; ++i) { i_curve[i] = i_curve[0]; } if(!error) { #ifndef OY_HYP #define OY_SQRT(a,b) ((a)*(a) + (b)*(b)) #define OY_HYP(a,b) pow(OY_SQRT(a,b),1.0/2.0) #endif /* reduce brightness remaining inside a cone with a roof angle of 30° */ double max_brightness = 1.0 - OY_HYP(icc_ab[0],icc_ab[1]/1.5); /* avoid color clipping around the white point */ curve_params_low[1] = max_brightness; o_curve[0] = cmsBuildParametricToneCurve(0, 6, curve_params_low); o_curve[1] = o_curve[2] = cmsBuildParametricToneCurve(0, 6, curve_params); if(!o_curve[0] || !o_curve[1]) error = 1; } if(error) goto lcm2CreateAbstractWhitePointProfileClean; if(icc_ab[1] > 0) { sprintf( kelvin_name, "Reddish CIE*a %g CIE*b %g", cie_a, cie_b ); } else if(-0.001 < icc_ab[1] && icc_ab[0] < 0.001) { sprintf( kelvin_name, "CIE*a %g CIE*b %g", cie_a, cie_b ); kelvin_meta[1] = "neutral,white_point,atom"; kelvin_meta[3] = "yes,D50,kelvin"; } else { sprintf( kelvin_name, "Bluish CIE*a %g CIE*b %g", cie_a, cie_b ); kelvin_meta[1] = "bluish,white_point,atom"; kelvin_meta[3] = "yes,bluish,kelvin"; } profile = lcm2CreateProfileFragment ( "*lab", // CIE*Lab "*lab", // CIE*Lab icc_profile_version, kelvin_name, "Oyranos project 2018", "Kai-Uwe Behrmann", ICC_2011_LICENSE, "CIE*Lab", "http://www.cie.co.at", NULL); if(!profile) goto lcm2CreateAbstractWhitePointProfileClean; error = lcm2CreateProfileLutByFuncAndCurves( profile, lcm2SamplerWhitePointLab, icc_ab, o_curve, i_curve, "*lab", "*lab", "*lab", grid_size, cmsSigAToB0Tag ); if(!error) lcm2AddMetaTexts ( profile, "EFFECT_,COLORIMETRY_,CMF_", kelvin_meta, cmsSigMetaTag ); lcm2CreateAbstractWhitePointProfileClean: if(i_curve[0]) cmsFreeToneCurve( i_curve[0] ); if(o_curve[0]) cmsFreeToneCurve( o_curve[0] ); if(o_curve[1]) cmsFreeToneCurve( o_curve[1] ); *my_abstract_file_name = kelvin_name; if(h_profile) *h_profile = profile; else if(profile && *my_abstract_file_name) { char * fn = lcm2WriteProfileToFile( profile, *my_abstract_file_name, 0,0 ); lcm2msg_p( 302, NULL, "wrote to: %s", fn?fn:"----"); lcm2Free_m(fn); cmsCloseProfile( profile ); } return error; } /** Function lcm2CreateAbstractWhitePointProfileBradford * @brief Create a effect profile of type abstract in ICC*Lab PCS for white point adjustment * * These profiles can be applied to 1D / per single channel only adjustments. * It will be marked with EFFECT_linear=yes in the meta tag. * * @param[in] src_iccXYZ source media white point * @param[in] illu_iccXYZ ICC*XYZ illuminant in 0.0 - 2.0 range * @param[in] grid_size dimensions of the created LUT; e.g. 33 * @param[in] icc_profile_version 2.3 or 4.3 * @param[in] flags - 0x01 : return only fast my_abstract_file_name, without expensive profile computation * @param[out] my_abstract_file_name profile file name * @param[out] h_profile the resulting profile; If omitted the function will write the profile to my_abstract_file_name. * * @version Oyranos: 0.9.7 * @date 2018/07/25 * @since 2017/06/02 (Oyranos: 0.9.7) */ int lcm2CreateAbstractWhitePointProfileBradford ( double * src_iccXYZ, double * illu_iccXYZ, int grid_size, double icc_profile_version, int flags, char ** my_abstract_file_name, cmsHPROFILE * h_profile ) { cmsHPROFILE profile = NULL; cmsToneCurve * i_curve[3] = {NULL,NULL,NULL}, * o_curve[3] = {NULL,NULL,NULL}; /* type[6] Y = (a * X + b) ^ Gamma + c order: {g, a, b, c} */ double curve_params[4] = {1,1,0,0}, curve_params_low[4] = {1,0.95,0,0}; int i; const char * kelvin_meta[] = { "EFFECT_class", "reddish,type,white_point,linear,atom", "EFFECT_linear", "yes", /* can be used for 1D curves like VCGT */ "COLORIMETRY_white_point", "yes,reddish,kelvin", "EFFECT_type", "bradford", "CMF_binary", "create-abstract", "CMF_version", "0.9.7", "CMF_product", "Oyranos", 0,0 }; char * kelvin_name = malloc(1024); int error = !kelvin_name; double icc_XYZ[6] = { src_iccXYZ[0], src_iccXYZ[1], src_iccXYZ[2], illu_iccXYZ[0], illu_iccXYZ[1], illu_iccXYZ[2]}; double icc_ab[2] = {0,0}; if(error) return 1; if(!(flags & 0x01)) /* skip computation */ { i_curve[0] = cmsBuildGamma(0, 1.0); if(!i_curve[0]) error = 1; for(i = 1; i < 3; ++i) { i_curve[i] = i_curve[0]; } } if(!error) { #ifndef OY_HYP #define OY_SQRT(a,b) ((a)*(a) + (b)*(b)) #define OY_HYP(a,b) pow(OY_SQRT(a,b),1.0/2.0) #endif /* reduce brightness remaining inside a cone with a roof angle of 30° */ double max_brightness; double src_Lab[3], dst_Lab[3]; lcm2iccXYZ2iccLab( src_iccXYZ, src_Lab ); lcm2iccXYZ2iccLab( illu_iccXYZ, dst_Lab ); icc_ab[0] = dst_Lab[1] - src_Lab[1]; icc_ab[1] = dst_Lab[2] - src_Lab[2]; max_brightness = 1.0 - OY_HYP(icc_ab[0],icc_ab[1]/1.5); if(!(flags & 0x01)) /* skip computation */ { /* avoid color clipping around the white point */ curve_params_low[1] = max_brightness; o_curve[0] = cmsBuildParametricToneCurve(0, 6, curve_params_low); o_curve[1] = o_curve[2] = cmsBuildParametricToneCurve(0, 6, curve_params); if(!o_curve[0] || !o_curve[1]) error = 1; } } if(error) goto lcm2CreateAbstractWhitePointProfileBClean; if(icc_ab[1] > 0) { sprintf( kelvin_name, "Bradford Reddish CIE*a %g CIE*b %g v1 lcm2", icc_ab[0], icc_ab[1] ); } else if(-0.001 < icc_ab[1] && icc_ab[0] < 0.001) { sprintf( kelvin_name, "Bradford CIE*a %g CIE*b %g v1 lcm2", icc_ab[0], icc_ab[1] ); kelvin_meta[1] = "neutral,type,white_point,atom"; kelvin_meta[3] = "yes,D50,kelvin"; } else { sprintf( kelvin_name, "Bradford Bluish CIE*a %g CIE*b %g v1 lcm2", icc_ab[0], icc_ab[1] ); kelvin_meta[1] = "bluish,type,white_point,atom"; kelvin_meta[3] = "yes,bluish,kelvin"; } *my_abstract_file_name = kelvin_name; if(flags & 0x01) /* skip computation */ { return error; } profile = lcm2CreateProfileFragment ( "*lab", // CIE*Lab "*lab", // CIE*Lab icc_profile_version, kelvin_name, "Oyranos project 2018", "Kai-Uwe Behrmann", ICC_2011_LICENSE, "Bradford", "http://www.cie.co.at", NULL); if(!profile) goto lcm2CreateAbstractWhitePointProfileBClean; error = lcm2CreateProfileLutByFuncAndCurves( profile, lcm2SamplerWhitePointBradford, icc_XYZ, o_curve, i_curve, "*lab", "*lab", "*lab", grid_size, cmsSigAToB0Tag ); if(!error) lcm2AddMetaTexts ( profile, "EFFECT_,COLORIMETRY_,CMF_", kelvin_meta, cmsSigMetaTag ); lcm2CreateAbstractWhitePointProfileBClean: if(i_curve[0]) cmsFreeToneCurve( i_curve[0] ); if(o_curve[0]) cmsFreeToneCurve( o_curve[0] ); if(o_curve[1]) cmsFreeToneCurve( o_curve[1] ); if(h_profile) *h_profile = profile; else if(profile && *my_abstract_file_name) { char * fn = lcm2WriteProfileToFile( profile, *my_abstract_file_name, 0,0 ); lcm2msg_p( 302, NULL, "wrote to: %s", fn?fn:"----"); lcm2Free_m(fn); cmsCloseProfile( profile ); } return error; } /** Function lcm2CreateProfileFragment * @brief Create a color profile starter * * In case both the in_space_profile and out_space_profile arguments are set * to "*lab", the profile will be set to class abstract. In case the * in_space_profile is not "*lab" and the later one is different, a color * profile of class input will be generated. With in_space_profile not "*lab" * and out_space_profile "*lab" a color profile of class output will be * generated. Note such profiles have initially no backward LUT and can not * be used for inverse color transforms, which might be a problem for general * purpose ICC profiles. But you can add more tables if needed by passing in a * previously created profile. * * All profiles generated by this function are meant to be filled with * colorimetric data by e.g. lcm2CreateProfileLutByFunc() or * lcm2CreateICCMatrixProfile2(). * * Here a code example: * @code cmsHPROFILE profile = lcm2CreateProfileFragment ( "*srgb", // sRGB "*lab", // CIE*Lab 2.3, "MySpace (MyProject)", "My Project 2016", "My Name", ICC_2011_LICENSE, "My Box", "www.mydomain.net", NULL ); @endcode * * @param[in] in_space_profile input color space; for wildcards see * lcm2OpenProfileFile() * @param[in] out_space_profile output color space; for wildcards see * lcm2OpenProfileFile() * @param[in] icc_profile_version 2.3 or 4.3 * @param[in] my_abstract_description internal profile name * @param[in] provider e.g. "My Project 2016" * @param[in] vendor e.g. "My Name" * @param[in] my_license e.g. "This profile is made available by %s, with permission of %s, and may be copied, distributed, embedded, made, used, and sold without restriction. Altered versions of this profile shall have the original identification and copyright information removed and shall not be misrepresented as the original profile." * first %s is provider string arg and * second %s is filled by vendor string arg * @param[in] device_model e.g. "My Set" * @param[in] device_manufacturer e.g. "www.mydomain.net"; hint: * lcms <= 2.08 writes a malformed desc tag * @param[in,out] h_profile use existing profile; optional * * @version Oyranos: 0.9.6 * @date 2016/03/06 * @since 2009/11/04 (Oyranos: 0.1.10) */ cmsHPROFILE lcm2CreateProfileFragment( const char * in_space_profile, const char * out_space_profile, double icc_profile_version, const char * my_abstract_description, const char * provider, const char * vendor, const char * my_license, const char * device_model, const char * device_manufacturer, cmsHPROFILE h_profile ) { cmsHPROFILE h_in_space = 0, h_out_space = 0; cmsColorSpaceSignature csp_in, csp_out; cmsProfileClassSignature profile_class = cmsSigAbstractClass; cmsMLU * mlu[4] = {0,0,0,0}; int i; char * license = NULL; if(!h_profile) { h_profile = cmsCreateProfilePlaceholder( 0 ); } if(!h_profile) goto lcm2CreateProfileFragmentClean; if(in_space_profile) h_in_space = lcm2OpenProfileFile( in_space_profile, NULL ); if(out_space_profile)h_out_space = lcm2OpenProfileFile( out_space_profile, NULL ); csp_in = cmsGetColorSpace( h_in_space ); csp_out = cmsGetColorSpace( h_out_space ); cmsSetProfileVersion( h_profile, icc_profile_version ); #define CSP_IS_PCS(csp) (csp == cmsSigLabData || csp == cmsSigXYZData) if( CSP_IS_PCS(csp_in) && CSP_IS_PCS(csp_out) ) profile_class = cmsSigAbstractClass; else if( CSP_IS_PCS(csp_out) ) profile_class = cmsSigInputClass; else if( CSP_IS_PCS(csp_in) ) profile_class = cmsSigOutputClass; else profile_class = cmsSigLinkClass; cmsSetDeviceClass( h_profile, profile_class ); cmsSetColorSpace( h_profile, csp_in ); cmsSetPCS( h_profile, csp_out ); for(i = 0; i < 4; ++i) mlu[i] = cmsMLUalloc(0,1); if(!(mlu[0] && mlu[1] && mlu[2] && mlu[3])) return h_profile; cmsMLUsetASCII(mlu[0], "EN", "us", my_abstract_description); cmsWriteTag( h_profile, cmsSigProfileDescriptionTag, mlu[0] ); if(device_model) { cmsMLUsetASCII(mlu[1], "EN", "us", device_model); cmsWriteTag( h_profile, cmsSigDeviceModelDescTag, mlu[1]); } if(device_manufacturer) { cmsMLUsetASCII(mlu[2], "EN", "us", device_manufacturer); cmsWriteTag( h_profile, cmsSigDeviceMfgDescTag, mlu[2]); } license = (char *) malloc( strlen(my_license) + strlen(provider) + strlen(vendor) + 1 ); if(!license) goto lcm2CreateProfileFragmentClean;; sprintf( license, my_license, provider, vendor ); cmsMLUsetASCII(mlu[3], "EN", "us", license); cmsWriteTag( h_profile, cmsSigCopyrightTag, mlu[3]); cmsWriteTag( h_profile, cmsSigMediaWhitePointTag, cmsD50_XYZ() ); lcm2CreateProfileFragmentClean: if(h_in_space) { cmsCloseProfile( h_in_space ); } h_in_space = 0; if(h_out_space) { cmsCloseProfile( h_out_space ); } h_out_space = 0; for(i = 0; i < 4; ++i) cmsMLUfree( mlu[i] ); lcm2Free_m(license); return h_profile; } int isBigEndian () { union { unsigned short u16; unsigned char c; } test = { .u16 = 1 }; return !test.c; } /* UTF-8 to WCHAR_T conversion */ typedef uint32_t UTF32; /* at least 32 bits */ typedef uint16_t UTF16; /* at least 16 bits */ typedef uint8_t UTF8; /* typically 8 bits */ typedef unsigned char Boolean; /* 0 or 1 */ /* Some fundamental constants */ #define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD #define UNI_MAX_BMP (UTF32)0x0000FFFF #define UNI_MAX_UTF16 (UTF32)0x0010FFFF #define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF #define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF typedef enum { conversionOK, /* conversion successful */ sourceExhausted, /* partial character in source, but hit end */ targetExhausted, /* insuff. room in target for conversion */ sourceIllegal /* source sequence is illegal/malformed */ } lcm2UtfConversionResult; typedef enum { strictConversion = 0, lenientConversion } lcm2UtfConversionFlags; static const int halfShift = 10; /* used for shifting by 10 bits */ static const UTF32 halfBase = 0x0010000UL; static const UTF32 halfMask = 0x3FFUL; #define UNI_SUR_HIGH_START (UTF32)0xD800 #define UNI_SUR_HIGH_END (UTF32)0xDBFF #define UNI_SUR_LOW_START (UTF32)0xDC00 #define UNI_SUR_LOW_END (UTF32)0xDFFF #define false 0 #define true 1 /* * Index into the table below with the first byte of a UTF-8 sequence to * get the number of trailing bytes that are supposed to follow it. * Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is * left as-is for anyone who may want to do such conversion, which was * allowed in earlier algorithms. */ static const char trailingBytesForUTF8[256] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 }; /* * Magic values subtracted from a buffer value during UTF8 conversion. * This table contains as many values as there might be trailing bytes * in a UTF-8 sequence. */ static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL, 0x03C82080UL, 0xFA082080UL, 0x82082080UL }; /* * Utility routine to tell whether a sequence of bytes is legal UTF-8. * This must be called with the length pre-determined by the first byte. * If not calling this from ConvertUTF8to*, then the length can be set by: * length = trailingBytesForUTF8[*source]+1; * and the sequence is illegal right away if there aren't that many bytes * available. * If presented with a length > 4, this returns false. The Unicode * definition of UTF-8 goes up to 4-byte sequences. */ static Boolean isLegalUTF8(const UTF8 *source, int length) { UTF8 a; const UTF8 *srcptr = source+length; switch (length) { default: return false; /* Everything else falls through when "true"... */ case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false; OY_FALLTHROUGH case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false; OY_FALLTHROUGH case 2: if ((a = (*--srcptr)) > 0xBF) return false; switch (*source) { /* no fall-through in this inner switch */ case 0xE0: if (a < 0xA0) return false; break; case 0xED: if (a > 0x9F) return false; break; case 0xF0: if (a < 0x90) return false; break; case 0xF4: if (a > 0x8F) return false; break; default: if (a < 0x80) return false; OY_FALLTHROUGH } OY_FALLTHROUGH case 1: if (*source >= 0x80 && *source < 0xC2) return false; } if (*source > 0xF4) return false; return true; } lcm2UtfConversionResult lcm2ConvertUTF8toUTF16 (const UTF8** sourceStart, const UTF8* sourceEnd, UTF16** targetStart, UTF16* targetEnd, lcm2UtfConversionFlags flags) { lcm2UtfConversionResult result = conversionOK; const UTF8* source = *sourceStart; UTF16* target = *targetStart; while (source < sourceEnd) { UTF32 ch = 0; unsigned short extraBytesToRead = trailingBytesForUTF8[*source]; if (source + extraBytesToRead >= sourceEnd) { result = sourceExhausted; break; } /* Do this check whether lenient or strict */ if (! isLegalUTF8(source, extraBytesToRead+1)) { result = sourceIllegal; break; } /* * The cases all fall through. See "Note A" below. */ switch (extraBytesToRead) { case 5: ch += *source++; ch <<= 6; OY_FALLTHROUGH/* remember, illegal UTF-8 */ case 4: ch += *source++; ch <<= 6; OY_FALLTHROUGH /* remember, illegal UTF-8 */ case 3: ch += *source++; ch <<= 6; OY_FALLTHROUGH case 2: ch += *source++; ch <<= 6; OY_FALLTHROUGH case 1: ch += *source++; ch <<= 6; OY_FALLTHROUGH case 0: ch += *source++; OY_FALLTHROUGH } ch -= offsetsFromUTF8[extraBytesToRead]; if (target >= targetEnd) { source -= (extraBytesToRead+1); /* Back up source pointer! */ result = targetExhausted; break; } if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */ /* UTF-16 surrogate values are illegal in UTF-32 */ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { if (flags == strictConversion) { source -= (extraBytesToRead+1); /* return to the illegal value itself */ result = sourceIllegal; break; } else { *target++ = UNI_REPLACEMENT_CHAR; } } else { *target++ = (UTF16)ch; /* normal case */ } } else if (ch > UNI_MAX_UTF16) { if (flags == strictConversion) { result = sourceIllegal; source -= (extraBytesToRead+1); /* return to the start */ break; /* Bail out; shouldn't continue */ } else { *target++ = UNI_REPLACEMENT_CHAR; } } else { /* target is a character in range 0xFFFF - 0x10FFFF. */ if (target + 1 >= targetEnd) { source -= (extraBytesToRead+1); /* Back up source pointer! */ result = targetExhausted; break; } ch -= halfBase; *target++ = (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START); *target++ = (UTF16)((ch & halfMask) + UNI_SUR_LOW_START); } } *sourceStart = source; *targetStart = target; return result; } wchar_t * lcm2Utf8ToWchar ( const char * text ) { wchar_t * wchar_out, * tmp_out; char * in, * tmp_in; size_t in_len = strlen(text), out_len = in_len*sizeof(wchar_t)+sizeof(wchar_t); lcm2UtfConversionResult error; if(!in_len) return 0; else ++in_len; tmp_out = wchar_out = calloc( in_len+1, sizeof(wchar_t) ); in = tmp_in = strdup( text ); error = lcm2ConvertUTF8toUTF16( (const UTF8**)&in, (const UTF8*)in+in_len, (UTF16**)&tmp_out, (UTF16*)(tmp_out+out_len), lenientConversion ); if(error == conversionOK) { /* store UTF16BE in wchar_t for lcms2 */ uint16_t * icc_utf16 = (uint16_t*) wchar_out; int i; for(i = in_len; i >= 0; --i) wchar_out[i] = icc_utf16[i]; } else { lcm2msg_p( 300, NULL, "error[%d] %lu %lu %s", error, in_len, out_len, text ); lcm2Free_m(wchar_out); } lcm2Free_m( tmp_in ); return wchar_out; } /** Function lcm2AddMluDescription * @brief Add translated texts to a profile * * Iterates over the provided string list converts from "UTF-8" input * to "WCHAR_T" for lcms and * does byteswapping on little endian machines. * * Here a code example: * @code const char * texts[] = { "de", "DE", "Mein Text", "en", "US", "My Text" }; lcm2AddMluDescription ( profile, texts, cmsSigProfileDescriptionMLTag ); @endcode * * @param[in,out] profile color profile * @param[in] texts language + country + text list * @param[in] tag_sig signature * * @version Oyranos: 0.9.6 * @date 2016/03/13 * @since 2016/03/13 (Oyranos: 0.9.6) */ void lcm2AddMluDescription ( cmsHPROFILE profile, const char * texts[], cmsTagSignature tag_sig ) { int n = 0, i; cmsMLU * mlu = NULL; if(texts) while( texts[n] ) ++n; if(!n) return; mlu = cmsMLUalloc( 0, n/3 + 1 ); if(!mlu) return; for( i = 0; i < n; i += 3 ) { char lang[4] = {0,0,0,0}, country[4] = {0,0,0,0}; const char * text = texts[i+2]; wchar_t * wchar_out; wchar_out = lcm2Utf8ToWchar( text ); if(!wchar_out) continue; /* the language code is stored as readable 4 byte string */ lang[0] = texts[i+0][0]; lang[1] = texts[i+0][1]; country[0] = texts[i+1][0]; country[1] = texts[i+1][1]; cmsMLUsetWide( mlu, lang, country, wchar_out ); lcm2Free_m( wchar_out ); } cmsWriteTag( profile, tag_sig, mlu ); cmsMLUfree( mlu ); } /** Function lcm2AddMetaTexts * @brief Add meta data to a profile * * Iterates over the provided string list converts from "UTF-8" input * to "WCHAR_T" for lcms and * does byteswapping on little endian machines. * * Here a code example: * @code const char * texts[] = { "GROUP_key1", "value1", "DOMAIN_key2", "value2" }; lcm2AddMetaTexts ( profile, "GROUP_,DOMAIN_", texts, cmsSigMetaTag ); @endcode * * A prefix allows for grouping of keys like "EDID_" or "EXIF_". * The prefix part might be cut off in some cases to access an other level * of keys. Think of "EDID_model" for monitors and "EXIF_model" for cameras, * which both represent the key "model" concept. * * @param[in,out] profile color profile * @param[in] prefixes The used uppercase prefix list. * @param[in] key_value key + value list * @param[in] tag_sig signature * * @version Oyranos: 0.9.7 * @date 2017/02/11 * @since 2017/02/11 (Oyranos: 0.9.7) */ void lcm2AddMetaTexts ( cmsHPROFILE profile, const char * prefixes, const char * key_value[], cmsTagSignature tag_sig ) { int n = 0, i; cmsHANDLE dict = NULL; cmsContext contextID = cmsCreateContext( NULL,NULL ); wchar_t * wchar_key = NULL, * wchar_val = NULL; if(key_value) while( key_value[n] ) ++n; if(n) dict = cmsDictAlloc( contextID ); else lcm2msg_p( 300, NULL, "nothing to write %s", __func__ ); if(!dict) return; if(prefixes) { wchar_key = lcm2Utf8ToWchar( "prefix" ); wchar_val = lcm2Utf8ToWchar( prefixes ); } if(wchar_key && wchar_val) cmsDictAddEntry( dict, wchar_key, wchar_val, NULL,NULL ); lcm2Free_m( wchar_key ); lcm2Free_m( wchar_val ); for( i = 0; i < n; i += 2 ) { const char * key = key_value[i+0], * val = key_value[i+1]; wchar_key = lcm2Utf8ToWchar(key), wchar_val = lcm2Utf8ToWchar(val); if(!wchar_key || !wchar_val) { lcm2Free_m( wchar_key ); lcm2Free_m( wchar_val ); continue; } cmsDictAddEntry( dict, wchar_key, wchar_val, NULL,NULL ); lcm2Free_m( wchar_key ); lcm2Free_m( wchar_val ); } cmsWriteTag( profile, tag_sig, dict ); cmsDictFree( dict ); } /** Function lcm2CreateICCMatrixProfile2 * @brief Create a profile from primaries, white point and one gamma value * * Used for ICC from EDID, Camera RAW etc. Marti calls these matrix/shaper. * @code // create linear space with REC.709/sRGB primaries and D65 white point cmsHPROFILE h_my_space = lcm2CreateICCMatrixProfile2( 1.0, 0.64,0.33, 0.30,0.60, 0.15,0.06, 0.3127,0.329 ); @endcode * * @version Oyranos: 0.9.6 * @date 2016/03/04 * @since 2009/10/24 (Oyranos: 0.1.10) */ cmsHPROFILE lcm2CreateICCMatrixProfile2 ( float gamma, float rx, float ry, float gx, float gy, float bx, float by, float wx, float wy ) { cmsCIExyYTRIPLE p; cmsToneCurve * g[3] = {0,0,0}; /* 0.31271, 0.32902 D65 */ cmsCIExyY wtpt_xyY; cmsHPROFILE lp = 0; p.Red.x = rx; p.Red.y = ry; p.Red.Y = 1.0; p.Green.x = gx; p.Green.y = gy; p.Green.Y = 1.0; p.Blue.x = bx; p.Blue.y = by; p.Blue.Y = 1.0; wtpt_xyY.x = wx; wtpt_xyY.y = wy; wtpt_xyY.Y = 1.0; g[0] = g[1] = g[2] = cmsBuildGamma(0, (double)gamma); if(!g[0]) return NULL; lp = cmsCreateRGBProfile( &wtpt_xyY, &p, g); cmsFreeToneCurve( g[0] ); return lp; } /** Function lcm2MessageFunc * @brief default message function to console * * The default message function is used as a message printer to the console * from library start. * * @param code a message code understood be your message * handler or openiccMSG_e * @param context_object a openicc object is expected * @param format the text format string for following args * @param ... the variable args fitting to format * @return 0 - success; 1 - error * * @version OpenICC: 0.1.0 * @date 2009/07/20 * @since 2008/04/03 (OpenICC: 0.1.0) */ int lcm2MessageFunc ( int/*openiccMSG_e*/ code OY_UNUSED, const void * context_object OY_UNUSED, const char * format, ... ) { char * text = 0; int error = 0; va_list list; size_t sz = 0; int len = 0; va_start( list, format); len = vsnprintf( text, sz, format, list); va_end ( list ); { text = calloc( sizeof(char), len+2 ); if(!text) { fprintf(stderr, "Could not allocate 256 byte of memory.\n"); return 1; } va_start( list, format); len = vsnprintf( text, len+1, format, list); va_end ( list ); } if(text) fprintf( stderr, "%s\n", text ); lcm2Free_m( text ); return error; } lcm2Message_f lcm2msg_p = lcm2MessageFunc; /** @brief set a custom message function * * Use to connect to user message system. */ int lcm2MessageFuncSet ( lcm2Message_f message_func ) { if(message_func) lcm2msg_p = message_func; else lcm2msg_p = lcm2MessageFunc; return 1; } /** @brief run time API version */ int lcm2Version ( ) { return LCM2PROFILER_API; } /** @} */ /* profiler */ /** \addtogroup profiler * * Oyranos ICC Profiler API provides a platformindependent C interface to generate * ICC profiles. It's main purpose is to generate ICC Profiles in a programatic way. * The only dependency is littleCMS 2 * <a href="http://www.littlecms.com">www.littlecms.com</a>. * It reduces the need of many of the lcms2 * boilerplate for format independent sampling, multi localised strings from UTF8 * and more. The sampler collection contains effects and color space converters. * The code consists of one source file and a header. So it can easily * be placed inside your project. * * * @section api API Documentation * The Oyranos ICC Profiler API is contained in the lcm2_profiler.h header file. * * The high level API takes few arguments and generates a profile in * one go. * Effect profiles can be created in one call * by lcm2CreateAbstractProfile(). It needs a @ref samplers function, which * fills the Look Up Table (LUT). Three APIs exist to generate white point * effects, lcm2CreateAbstractTemperatureProfile() and * lcm2CreateAbstractWhitePointProfileLab() or * lcm2CreateAbstractWhitePointProfileBradford(). These above high level APIs allow to * write the profile to disc in one go. * * The lower level APIs can be used to customise the profile generation. * Basic matrix/shaper profiles can be created with * lcm2CreateICCMatrixProfile2() and filled with custom texts in * lcm2CreateProfileFragment(). * * The following low level code sample comes from @ref lcm2_profiler.c. * The code sets up a basic profile description and color spaces: * @dontinclude lcm2_profiler.c * @code * // prepare some variables * double icc_profile_version = 2.3; * double icc_ab[2] = {0.0, 0.0}; * cmsHPROFILE profile; * const char * kelvin_name = "5000 K" * int error; * int grid_size = 17; * cmsToneCurve * i_curve[3] = {NULL,NULL,NULL}, * o_curve[3] = {NULL,NULL,NULL}; i_curve[0] = o_curve[0] = cmsBuildGamma(0, 1.0); for(i = 1; i < 3; ++i) { i_curve[i] = o_curve[i] = i_curve[0]; } * @endcode * @skip fragment @until cmsSigAToB0Tag * * Profile i/o happens with lcm2OpenProfileFile(), which takes file names and * a few wildcards as arguments. lcm2WriteProfileToFile() helps writing of * canonical profile names. lcm2WriteProfileToMem() writes a profile to a * custom memory allocator. * * Most of the functions come with examples. * */
PermutationMMD.h
/* * Copyright (c) The Shogun Machine Learning Toolbox * Written (w) 2012 - 2013 Heiko Strathmann * Written (w) 2014 - 2017 Soumyajit De * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those * of the authors and should not be interpreted as representing official policies, * either expressed or implied, of the Shogun Development Team. */ #ifndef PERMUTATION_MMD_H_ #define PERMUTATION_MMD_H_ #include <algorithm> #include <numeric> #include <shogun/lib/SGVector.h> #include <shogun/lib/SGMatrix.h> #include <shogun/mathematics/Math.h> #include <shogun/statistical_testing/internals/mmd/ComputeMMD.h> namespace shogun { namespace internal { namespace mmd { #ifndef DOXYGEN_SHOULD_SKIP_THIS struct PermutationMMD : ComputeMMD { PermutationMMD() : m_save_inds(false) { } template <class Kernel> SGVector<float32_t> operator()(const Kernel& kernel) { ASSERT(m_n_x>0 && m_n_y>0); ASSERT(m_num_null_samples>0); precompute_permutation_inds(); const index_t size=m_n_x+m_n_y; SGVector<float32_t> null_samples(m_num_null_samples); #pragma omp parallel for for (auto n=0; n<m_num_null_samples; ++n) { terms_t terms; for (auto j=0; j<size; ++j) { auto inverted_col=m_inverted_permuted_inds(j, n); for (auto i=j; i<size; ++i) { auto inverted_row=m_inverted_permuted_inds(i, n); if (inverted_row>=inverted_col) add_term_lower(terms, kernel(i, j), inverted_row, inverted_col); else add_term_lower(terms, kernel(i, j), inverted_col, inverted_row); } } null_samples[n]=compute(terms); SG_SDEBUG("null_samples[%d] = %f!\n", n, null_samples[n]); } return null_samples; } SGMatrix<float32_t> operator()(const KernelManager& kernel_mgr) { ASSERT(m_n_x>0 && m_n_y>0); ASSERT(m_num_null_samples>0); precompute_permutation_inds(); const index_t size=m_n_x+m_n_y; SGMatrix<float32_t> null_samples(m_num_null_samples, kernel_mgr.num_kernels()); SGVector<float32_t> km(size*(size+1)/2); for (auto k=0; k<kernel_mgr.num_kernels(); ++k) { auto kernel=kernel_mgr.kernel_at(k); terms_t terms; for (auto i=0; i<size; ++i) { for (auto j=i; j<size; ++j) { auto index=i*size-i*(i+1)/2+j; km[index]=kernel->kernel(i, j); } } #pragma omp parallel for for (auto n=0; n<m_num_null_samples; ++n) { terms_t null_terms; for (auto i=0; i<size; ++i) { auto inverted_row=m_inverted_permuted_inds(i, n); auto index_base=i*size-i*(i+1)/2; for (auto j=i; j<size; ++j) { auto index=index_base+j; auto inverted_col=m_inverted_permuted_inds(j, n); if (inverted_row<=inverted_col) add_term_upper(null_terms, km[index], inverted_row, inverted_col); else add_term_upper(null_terms, km[index], inverted_col, inverted_row); } } null_samples(n, k)=compute(null_terms); } } return null_samples; } template <class Kernel> float64_t p_value(const Kernel& kernel) { auto statistic=ComputeMMD::operator()(kernel); auto null_samples=operator()(kernel); return compute_p_value(null_samples, statistic); } SGVector<float64_t> p_value(const KernelManager& kernel_mgr) { ASSERT(m_n_x>0 && m_n_y>0); ASSERT(m_num_null_samples>0); precompute_permutation_inds(); const index_t size=m_n_x+m_n_y; SGVector<float32_t> null_samples(m_num_null_samples); SGVector<float64_t> result(kernel_mgr.num_kernels()); SGVector<float32_t> km(size*(size+1)/2); for (auto k=0; k<kernel_mgr.num_kernels(); ++k) { auto kernel=kernel_mgr.kernel_at(k); terms_t terms; for (auto i=0; i<size; ++i) { for (auto j=i; j<size; ++j) { auto index=i*size-i*(i+1)/2+j; km[index]=kernel->kernel(i, j); add_term_upper(terms, km[index], i, j); } } float32_t statistic=compute(terms); SG_SDEBUG("Kernel(%d): statistic=%f\n", k, statistic); #pragma omp parallel for for (auto n=0; n<m_num_null_samples; ++n) { terms_t null_terms; for (auto i=0; i<size; ++i) { auto inverted_row=m_inverted_permuted_inds(i, n); auto index_base=i*size-i*(i+1)/2; for (auto j=i; j<size; ++j) { auto index=index_base+j; auto inverted_col=m_inverted_permuted_inds(j, n); if (inverted_row<=inverted_col) add_term_upper(null_terms, km[index], inverted_row, inverted_col); else add_term_upper(null_terms, km[index], inverted_col, inverted_row); } } null_samples[n]=compute(null_terms); } result[k]=compute_p_value(null_samples, statistic); SG_SDEBUG("Kernel(%d): p_value=%f\n", k, result[k]); } return result; } inline void precompute_permutation_inds() { ASSERT(m_num_null_samples>0); allocate_permutation_inds(); for (auto n=0; n<m_num_null_samples; ++n) { std::iota(m_permuted_inds.data(), m_permuted_inds.data()+m_permuted_inds.size(), 0); CMath::permute(m_permuted_inds); if (m_save_inds) { auto offset=n*m_permuted_inds.size(); std::copy(m_permuted_inds.data(), m_permuted_inds.data()+m_permuted_inds.size(), &m_all_inds.matrix[offset]); } for (index_t i=0; i<m_permuted_inds.size(); ++i) m_inverted_permuted_inds(m_permuted_inds[i], n)=i; } } inline float64_t compute_p_value(SGVector<float32_t>& null_samples, float32_t statistic) const { std::sort(null_samples.data(), null_samples.data()+null_samples.size()); float64_t idx=null_samples.find_position_to_insert(statistic); return 1.0-idx/null_samples.size(); } inline void allocate_permutation_inds() { const index_t size=m_n_x+m_n_y; if (m_permuted_inds.size()!=size) m_permuted_inds=SGVector<index_t>(size); if (m_inverted_permuted_inds.num_cols!=m_num_null_samples || m_inverted_permuted_inds.num_rows!=size) m_inverted_permuted_inds=SGMatrix<index_t>(size, m_num_null_samples); if (m_save_inds && (m_all_inds.num_cols!=m_num_null_samples || m_all_inds.num_rows!=size)) m_all_inds=SGMatrix<index_t>(size, m_num_null_samples); } index_t m_num_null_samples; bool m_save_inds; SGVector<index_t> m_permuted_inds; SGMatrix<index_t> m_inverted_permuted_inds; SGMatrix<index_t> m_all_inds; }; #endif // DOXYGEN_SHOULD_SKIP_THIS } } } #endif // PERMUTATION_MMD_H_
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/OpenMPClause.h" #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFENVHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static ParsedType getTypeAnnotation(const Token &Tok) { return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, ParsedType T) { Tok.setAnnotationValue(T.getAsOpaquePtr()); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevPreferredType = P.PreferredType; PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; explicit LexedMethod(Parser* P, Decl *MD) : Self(P), D(MD), TemplateScope(false) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), TemplateScope(false), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), TemplateScope(false), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { } /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class had an associated template /// scope. When true, TagOrTemplate is a template declaration; /// otherwise, it is a tag declaration. bool TemplateScope : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); static void LateTemplateParserCleanupCallback(void *P); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; struct ParsedAttributesViewWithRange : ParsedAttributesView { ParsedAttributesViewWithRange() : ParsedAttributesView() {} void clearListOnly() { ParsedAttributesView::clearListOnly(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc, if non-NULL, is filled with the location of the last token of // the simple-asm. ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr); ExprResult ParseAsmStringLiteral(); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator(); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, Decl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Based only on the given token kind, determine whether we know that /// we're at the start of an expression or a type-specifier-seq (which may /// be an expression, in C++). /// /// This routine does not attempt to resolve any of the trick cases, e.g., /// those involving lookup of identifiers. /// /// \returns \c TPR_true if this token starts an expression, \c TPR_false if /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot /// tell. TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } void MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); } } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( Declarator &D, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse clauses for '#pragma omp declare target'. DeclGroupPtrTy ParseOMPDeclareTargetClauses(); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *TailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val; SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers> MapTypeModifiers; SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers> MapTypeModifiersLoc; OpenMPMapClauseKind MapType = OMPC_MAP_unknown; bool IsMapTypeImplicit = false; SourceLocation DepLinMapLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, ParsedType ObjectType, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); bool isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true); void AnnotateTemplateIdTokenAsType(bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; }; } // end namespace clang #endif
for_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 -verify %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp for simd for (int i = 0; i < 10; ++i) argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } // expected-error@+1 {{unexpected OpenMP directive '#pragma omp for simd'}} #pragma omp for simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp for simd'}} #pragma omp for simd foo void test_no_clause() { int i; #pragma omp for simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp for simd' must be a for loop}} #pragma omp for simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp for simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd; for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd linear(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; // expected-error@+1 {{expected '('}} #pragma omp for simd safelen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd safelen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd safelen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp for simd safelen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp for simd safelen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp for simd safelen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp for simd safelen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp for simd safelen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp for simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; // expected-error@+1 {{expected '('}} #pragma omp for simd simdlen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd simdlen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp for simd simdlen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp for simd simdlen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp for simd simdlen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp for simd simdlen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp for simd simdlen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp for simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp for simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp for simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp parallel // expected-error@+1 {{expected '('}} #pragma omp for simd collapse for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp for simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel #pragma omp for simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp for simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp for simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp for simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp for simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp for simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd collapse(2) for (i = 0; i < 16; ++i) // expected-note {{defined as lastprivate}} // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for simd' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+2 2 {{reduction variable must be shared}} // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp for simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp for simd linear(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd linear() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd linear(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp for simd linear(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp for simd linear(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp for simd linear(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp for simd linear(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // expected-error@+1 {{expected expression}} #pragma omp for simd linear(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp for simd linear(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp for simd linear(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} #pragma omp for simd linear(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} #pragma omp for simd private(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} #pragma omp for simd linear(x) private(x) for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} #pragma omp for simd linear(x, y : 0) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} #pragma omp for simd linear(x) lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} #pragma omp for simd lastprivate(x) linear(x) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp for simd aligned(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd aligned() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd aligned(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp for simd aligned(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp for simd aligned(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp for simd aligned(z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd aligned(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp for simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp for simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp for simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp for simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd private( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp for simd private(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp for simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd private() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp for simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp for simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp for simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp for simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp for simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp for simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp for simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp for simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp for simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp for simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp for simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp for simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp for simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp for simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void test_nontemporal() { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd nontemporal( for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd nontemporal(, for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 2 {{expected expression}} #pragma omp for simd nontemporal(, ) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected expression}} #pragma omp for simd nontemporal() for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected expression}} #pragma omp for simd nontemporal(int) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} omp50-error@+1 {{expected variable name}} #pragma omp for simd nontemporal(0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp for simd nontemporal(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp for simd nontemporal(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp for simd nontemporal(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd nontemporal(x :) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} #pragma omp for simd nontemporal(x :, ) for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}} #pragma omp for simd nontemporal(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} #pragma omp for simd private(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} #pragma omp for simd nontemporal(x) private(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} #pragma omp for simd nontemporal(x, y : 0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} #pragma omp for simd nontemporal(x) lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} #pragma omp for simd lastprivate(x) nontemporal(x) for (i = 0; i < 16; ++i) ; #pragma omp for simd order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} expected-error {{expected '(' after 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp for simd order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp for simd order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp for simd order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} for (int i = 0; i < 10; ++i) ; #pragma omp for simd order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} for (int i = 0; i < 10; ++i) ; }
tinyexr.h
/* Copyright (c) 2014 - 2019, Syoyo Fujita and many contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Syoyo Fujita nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- #ifndef TINYEXR_H_ #define TINYEXR_H_ // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif // Use embedded miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_THREAD #define TINYEXR_USE_THREAD (0) // No threaded loading. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_OPENMP #ifdef _OPENMP #define TINYEXR_USE_OPENMP (1) #else #define TINYEXR_USE_OPENMP (0) #endif #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-6) #define TINYEXR_ERROR_CANT_OPEN_FILE (-7) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8) #define TINYEXR_ERROR_INVALID_HEADER (-9) #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10) #define TINYEXR_ERROR_CANT_WRITE_FILE (-11) #define TINYEXR_ERROR_SERIALZATION_FAILED (-12) #define TINYEXR_ERROR_LAYER_NOT_FOUND (-13) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_HEADER_ATTRIBUTES (1024) #define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 int tiled; // tile format image int long_name; // long name attribute int non_image; // deep image(EXR 2.0) int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; int data_window[4]; int display_window[4]; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute *custom_attributes; // array of EXRAttribute. size = // `num_custom_attributes`. EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { For backward compatibility. Not recommended to use. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // Loads single-frame OpenEXR image by specifing layer name. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error // When the specified layer name is not found in the EXR file, the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`. extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layer_name, const char **err); // // Get layer infos from EXR file. // // @param[out] layer_names List of layer names. Application must free memory after using this. // @param[out] num_layers The number of layers // @param[out] err Error string(wll be filled when the function returns error code). Free it using FreeEXRErrorMessage after using this value. // // @return TINYEXR_SUCCEES upon success. // extern int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err); // @deprecated { to be removed. } // Simple wrapper API for ParseEXRHeaderFromFile. // checking given file is a EXR file(by just look up header) // @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for // others extern int IsEXR(const char *filename); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. // Use ZIP compression by default. // Returns negative value and may set error string in `err` when there's an // error extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename, const char **err); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Free's internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Free's internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Free's error message extern void FreeEXRErrorMessage(const char *msg); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if success. // Return zero and will set error string in `err` when there's an // error. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEIFNED #define TINYEXR_IMPLEMENTATION_DEIFNED #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <sstream> // #include <iostream> // debug #include <limits> #include <string> #include <vector> #if __cplusplus > 199711L // C++11 #include <cstdint> #if TINYEXR_USE_THREAD #include <atomic> #include <thread> #endif #endif // __cplusplus > 199711L #if TINYEXR_USE_OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #else // Issue #46. Please include your own zlib-compatible API header before // including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #include "zfp.h" #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif #if TINYEXR_USE_MINIZ namespace miniz { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #pragma clang diagnostic ignored "-Wundef" #if __has_warning("-Wcomma") #pragma clang diagnostic ignored "-Wcomma" #endif #if __has_warning("-Wmacro-redefined") #pragma clang diagnostic ignored "-Wmacro-redefined" #endif #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #if __has_warning("-Wtautological-constant-compare") #pragma clang diagnostic ignored "-Wtautological-constant-compare" #endif #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif /* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major release with Zip64 support (almost there!): - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug (thanks kahmyong.moon@hp.com) which could cause locate files to not find files. This bug would only have occured in earlier versions if you explicitly used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you can't switch to v1.15 but want to fix this bug, just remove the uses of this flag from both helper funcs (and of course don't use the flag). - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when pUser_read_buf is not NULL and compressed size is > uncompressed size - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract compressed data from directory entries, to account for weird zipfiles which contain zero-size compressed data on dir entries. Hopefully this fix won't cause any issues on weird zip archives, because it assumes the low 16-bits of zip external attributes are DOS attributes (which I believe they always are in practice). - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the internal attributes, just the filename and external attributes - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed - Added cmake support for Linux builds which builds all the examples, tested with clang v3.3 and gcc v4.6. - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti - Merged MZ_FORCEINLINE fix from hdeanclark - Fix <time.h> include before config #ifdef, thanks emil.brink - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping (super useful for OpenGL apps), and explicit control over the compression level (so you can set it to 1 for real-time compression). - Merged in some compiler fixes from paulharris's github repro. - Retested this build under Windows (VS 2010, including static analysis), tcc 0.9.26, gcc v4.6 and clang v3.3. - Added example6.c, which dumps an image of the mandelbrot set to a PNG file. - Modified example2 to help test the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix possible src file fclose() leak if alignment bytes+local header file write faiiled - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the wrong central dir header offset, appears harmless in this release, but it became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect). 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files. - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning). - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64. - Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test. - Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives. - Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.) - Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's. level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <bruced@valvesoftware.com> for the feedback/bug report. 5/28/11 v1.11 - Added statement from unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: - Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a - Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). - Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. - Refactored the compression code for better readability and maintainability. - Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). 5/15/11 v1.09 - Initial stable release. * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED //#include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be // CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on // stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able // to get the current time, or // get/set file times, and the C run-time funcs that get/set times won't be // called. // The current downside is the times written to your archives will be from 1979. #define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. #define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive // API's. //#define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression // API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent // conflicts against stock zlib. //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom // user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's // which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and // tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc // on Linux #define MINIZ_NO_TIME #endif #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) //#include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #if MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient // integer loads and stores from unaligned addresses. //#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \ 0 // disable to suppress compiler warnings #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are // reasonably fast (and don't involve compiler generated calls to helper // functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some // parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! typedef unsigned long mz_ulong; // mz_free() internally uses the MZ_FREE() macro (which by default calls free() // unless you've modified the MZ_MALLOC macro) to release a block allocated from // the heap. void mz_free(void *p); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with // ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with // ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: // items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void (*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_VERSION "9.1.15" #define MZ_VERNUM 0x91F0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 15 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The // other values are for advanced use (refer to the zlib docs). enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels: 0-9 are the standard zlib-style levels, 10 is best // possible compression (not zlib compatible, and may be very slow), // MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been // optimized purely for performance, not ratio. // (This special func. is currently only enabled when // MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with // zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no // header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as // calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input // and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or // MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not // available, and/or there's more output to be written but the output buffer // is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been // written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or // output buffers are empty. (Fill up the input buffer or free up some output // space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of // data that could be generated by deflate(), assuming flush is set to only // MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on // failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of // data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that // controls the window size and whether or not the stream has been wrapped with // a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or // -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the // input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output // buffers are both sized large enough to decompress the entire stream in a // single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside // what's already in the input buffer, and that the output buffer is large // enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or // there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes // have been written. For zlib streams, the adler-32 of the decompressed data // has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is // empty but the inflater needs more input to continue, or if the output // buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when // using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on // failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the // error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used // as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you // use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // An attempt to work around MSVC's spammy "warning C4127: conditional // expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct mz_zip_archive_tag { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is // 0 this function returns the number of bytes needed to fully store the // filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and // modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive // file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient // in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the // archive's filename so it can be reopened for writing. If the file can't be // reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be // growable using the realloc callback (which defaults to realloc unless you've // overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's // user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what // you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record // the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a // forwardslash with empty buffer. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records // the disk file's modified time into the archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no // recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by // the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive // struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be // valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if // mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) // appends a memory blob to a ZIP archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags used by tinfl_decompress(). // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and // ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the // input is a raw deflate stream. // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available // beyond the end of the supplied input buffer. If clear, the input buffer // contains all remaining input. // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large // enough to hold the entire decompressed stream. If clear, the output buffer is // at least the size of the dictionary (typically 32KB). // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the // decompressed bytes. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data // to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must call mz_free() on the returned block when it's no longer // needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block // in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes // written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an // internal 32KB buffer, and a user provided callback function will be called to // flush the buffer. // Returns 1 on success or 0 on failure. typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function // actually needed for decompression. All the other functions are just // high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any // desired higher level decompression API. In the limit case, it can be called // once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly // slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // tdefl_init() compression flags logically OR'd together (low 12 bits contain // the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes // per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap // compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before // the deflate data, and the Adler-32 of the source data at the end. Otherwise, // you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even // when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more // efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's // initialization time to the minimum, but the output may vary from run to run // given the same input (depending on the contents of memory). // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. // The low 12 bits are reserved to control the max # of hash probes per // dictionary lookup (see TDEFL_MAX_PROBES_MASK). enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against // the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in // memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be // 1, 2, 3, or 4. // The image pitch in bytes per scanline will be w*num_chans. The leftmost // pixel on the top scanline is stored first in memory. // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL // If flip is true, the image will be flipped on the Y axis (useful for OpenGL // apps). // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must mz_free() the returned heap block (which will typically be // larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write // compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The // above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed // output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper // functions aren't flexible enough. The low-level functions don't make any heap // allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1 } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; // tdefl's compression state structure. typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. // There is no corresponding deinit() function because the tdefl API's do not // dynamically allocate memory. // pBut_buf_func: If NULL, output data will be supplied to the specified // callback. In this case, the user should call the tdefl_compress_buffer() API // for compression. // If pBut_buf_func is NULL the user should always call the tdefl_compress() // API. // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, // etc.) tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the specified input buffer // as possible, and writing as much compressed data to the specified output // buffer as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a // non-NULL tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't // defined, because it uses some of its macros. #ifndef MINIZ_NO_ZLIB_APIS // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be // much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, // MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #endif // #ifndef MINIZ_NO_ZLIB_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want // the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; //#include <assert.h> //#include <string.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void)x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE inline __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C // implementation that balances processor cache usage against speed": // http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c}; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } void mz_free(void *p) { MZ_FREE(p); } #ifndef MINIZ_NO_ZLIB_APIS static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } // static void *def_realloc_func(void *opaque, void *address, size_t items, // size_t size) { // (void)opaque, (void)address, (void)items, (void)size; // return MZ_REALLOC(address, items * size); //} const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty // tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are // large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress( &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some // uncompressed data left in the output dictionary - // oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress // without supplying more input or by setting flush // to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data // when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's // at least 1 more byte on the way. If there's no more room left in the // output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = {{MZ_OK, ""}, {MZ_STREAM_END, "stream end"}, {MZ_NEED_DICT, "need dictionary"}, {MZ_ERRNO, "file error"}, {MZ_STREAM_ERROR, "stream error"}, {MZ_DATA_ERROR, "data error"}, {MZ_MEM_ERROR, "out of memory"}, {MZ_BUF_ERROR, "buf error"}, {MZ_VERSION_ERROR, "version error"}, {MZ_PARAM_ERROR, "parameter error"}}; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif // MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all // compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do { \ for (;;) { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt // to read beyond the input buf, then something is wrong with the input because // the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of // the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) \ do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for (;;) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes // remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode // the next Huffman code (and absolutely no more). It works by trying to fully // decode a // Huffman code by using whatever bits are currently present in the bit buffer. // If this fails, it reads another byte, and tries again until it succeeds or // until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex // than you would initially expect because the zlib API expects the decompressor // to never read // beyond the final byte of the deflate stream. (In other words, when this macro // wants to read another byte from the input, it REALLY needs another byte in // order to fully // decode the next Huffman code.) Handling this properly is particularly // important on raw deflate (non-zlib) streams, which aren't followed by a byte // aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0}; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static const int s_min_table_sizes[3] = {257, 1, 4}; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1ULL << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit: r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress( &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression // API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285}; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0}; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17}; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29}; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted // values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, // alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = \ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS( d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF}; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && // MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output // buffer and send a raw block instead. if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS( d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed // block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN( (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && \ (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better // register utilization. Intended for applications where raw throughput is // valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to // TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output // buffer. if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } #ifndef MINIZ_NO_ZLIB_APIS static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; // level may actually range from [0,10] (10 is a "hidden" max level, where we // want a bit more compression and it's fine if throughput to fall off a cliff // on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #endif // MINIZ_NO_ZLIB_APIS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public // domain: http://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. // This is actually a modification of Alex's original code so PNG files // generated by this function pass pngcheck. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was // defined. static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init( pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06}; mz_uint8 pnghdr[41] = {0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0, (mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0, 0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16), (mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41, 0x54}; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter( "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's // where #defined out) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #error "No arvhive APIs" #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #else #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler // alignment and platform endian issues, miniz.c doesn't use structs for any of // this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ ((element_type *)((array_ptr)->m_p))[index] static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; // On Linux with x86 glibc, this call will fail on large files (>= 0x80000000 // bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } #ifndef MINIZ_NO_TIME static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif // #ifndef MINIZ_NO_TIME #endif // #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central // directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), // but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename( mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for (;;) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); // Basic sanity checks - reject files which are too small, and check the first // 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end // towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; // Read the entire central directory into a heap block, and allocate another // heap block to hold the unsorted central dir file record offsets, and // another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return MZ_FALSE; } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some // basic sanity checking on each record, and check for zip64 entries (which // are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return MZ_FALSE; } file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh( mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; // First see if the filename ends with a '/' character. filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } // Bugfix: This code was also checking if the internal attribute was non-zero, // which wasn't correct. // Most/all zip writers (hopefully) set DOS file/directory attributes in the // low 16-bits, so check for the DOS directory flag and ignore the source OS // ID in the created by field. // FIXME: Remove this check? Is it necessary - we already check the filename. external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & 0x10) != 0) return MZ_TRUE; return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_reader_filename_compare( const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input // buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #endif return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #endif return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input // buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #endif return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32( file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback( pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } #endif // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); #ifdef _MSC_VER if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #else if ((!n) || ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #endif return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc( pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max // size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is // NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can // resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the // user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory // location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir( mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header( pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start // with a forward slash, cannot contain a drive letter, and cannot use // DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment( mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an // allocation fails the file remains unmodified. (A good idea if we're doing // an in-place modification.) if ((!mz_zip_array_ensure_room( pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32( uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer( pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back( pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid // central directory. (This may not always succeed, but we can try.) if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE; if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE; if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #ifdef _MSC_VER #pragma warning(pop) #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ // ---------------------- end of miniz ---------------------------------------- #ifdef __clang__ #pragma clang diagnostic pop #endif } // namespace miniz #else // Reuse MINIZ_LITTE_ENDIAN macro #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #endif // TINYEXR_USE_MINIZ // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static void SetErrorMessage(const std::string &msg, const char **err) { if (err) { #ifdef _WIN32 (*err) = _strdup(msg.c_str()); #else (*err) = strdup(msg.c_str()); #endif } } static const int kEXRVersionSize = 8; static void cpy2(unsigned short *dst_val, const unsigned short *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; } static void swap2(unsigned short *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-function" #endif #ifdef __GNUC__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-function" #endif static void cpy4(int *dst_val, const int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(unsigned int *dst_val, const unsigned int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(float *dst_val, const float *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef __GNUC__ #pragma GCC diagnostic pop #endif static void swap4(unsigned int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } #if 0 static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; } #endif static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // http://gist.github.com/rygorous/2156668 // Reuse MINIZ_LITTLE_ENDIAN flag from miniz. union FP32 { unsigned int u; float f; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif static const char *ReadString(std::string *s, const char *ptr, size_t len) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((size_t(q - ptr) < len) && (*q) != 0) { q++; } if (size_t(q - ptr) >= len) { (*s) = std::string(); return NULL; } (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len == 0) { if ((*type).compare("string") == 0) { // Accept empty string attribute. marker += sizeof(uint32_t); size -= sizeof(uint32_t); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t); data->resize(1); (*data)[0] = '\0'; return true; } else { return false; } } marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen)); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; int data_window[4]; int line_order; int display_window[4]; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; void clear() { channels.clear(); attributes.clear(); data_window[0] = 0; data_window[1] = 0; data_window[2] = 0; data_window[3] = 0; line_order = 0; display_window[0] = 0; display_window[1] = 0; display_window[2] = 0; display_window[3] = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; } } HeaderInfo; static bool ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data())); if (data_len < 0) { return false; } p = ReadString(&info.name, p, size_t(data_len)); if ((p == NULL) && (info.name.empty())) { // Buffer overrun. Issue #51. return false; } const unsigned char *data_end = reinterpret_cast<const unsigned char *>(p) + 16; if (data_end >= (data.data() + data.size())) { return false; } memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling)); channels.push_back(info); } return true; } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling)); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(src_size); int ret = miniz::mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (miniz::MZ_OK != ret) { return false; } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { return false; } #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressable run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; // Fixes #116: Add bounds check to in buffer. if ((0 > (maxLength -= count)) || (inLength < 0)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } // Workaround for issue #112. // TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`. if (src_size <= 2) { return false; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); if (ret != static_cast<int>(uncompressed_size)) { return false; } // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierachical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- int len : 8; // code length 0 int lit : 24; // lit p size int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // std::vector<int> hlink(HUF_ENCSIZE); std::vector<long long *> fHeap(HUF_ENCSIZE); *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); std::vector<long long> scode(HUF_ENCSIZE); memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode.data()); memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode >= ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { int *p = pl->p; pl->p = new int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #if 0 #define getCode(po, rlc, c, lc, in, out, ob, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ /* TinyEXR issue 78 */ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } #else static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in, const char *in_end, unsigned short *&out, const unsigned short *ob, const unsigned short *oe) { (void)ob; if (po == rlc) { if (lc < 8) { /* TinyEXR issue 78 */ if ((in + 1) >= in_end) { return false; } getChar(c, lc, in); } lc -= 8; unsigned char cs = (c >> lc); if (out + cs > oe) return false; // Bounds check for safety // Issue 100. if ((out - 1) < ob) return false; unsigned short s = out[-1]; while (cs-- > 0) *out++ = s; } else if (out < oe) { *out++ = po; } else { return false; } return true; } #endif // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; // begin unsigned short *oe = out + no; // end const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; // std::cout << "lit = " << pl.lit << std::endl; // std::cout << "rlc = " << rlc << std::endl; // std::cout << "c = " << c << std::endl; // std::cout << "lc = " << lc << std::endl; // std::cout << "in = " << in << std::endl; // std::cout << "out = " << out << std::endl; // std::cout << "oe = " << oe << std::endl; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) { return false; } break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(std::vector<long long> &freq, const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; std::vector<long long> freq(HUF_ENCSIZE); countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq.data(), &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq.data(), im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, std::vector<unsigned short> *raw) { if (nCompressed == 0) { if (raw->size() != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(), raw->data()); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap.data(), minNonZero, maxNonZero); std::vector<unsigned short> lut(USHORT_RANGE); unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data()); applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSize) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap.data(), 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; // minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr)); // maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } std::vector<unsigned short> lut(USHORT_RANGE); memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data()); // // Huffman decoding // int length; // length = *(reinterpret_cast<const int *>(ptr)); tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); if (size_t((ptr - inPtr) + length) > inLen) { return false; } std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; int precision; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0f; } }; bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) && (attributes[i].size == 1)) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; } } if (!foundType) { return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else { assert(0); } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, int num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = dst_width * dst_num_lines * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((dst_width & 3U) || (dst_num_lines & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, dst_width, dst_num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = dst_width * dst_num_lines; for (int c = 0; c < num_channels; c++) { // decompress 4x4 pixel block. for (int y = 0; y < dst_num_lines; y += 4) { for (int x = 0; x < dst_width; x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * dst_width + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((width & 3U) || (num_lines & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, width, num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = width * num_lines; for (int c = 0; c < num_channels; c++) { // compress 4x4 pixel block. for (int y = 0; y < num_lines; y += 4) { for (int x = 0; x < width; x += 4) { float fblock[16]; for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * width + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = zfp_stream_compressed_size(zfp); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // // TODO(syoyo): Refactor function arguments. static bool DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) { // Invalid input #90 return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); if (!ret) { return false; } // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; // hf.u = line_ptr[u]; // use `cpy` to avoid unaligned memory access when compiler's // optimization is on. tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); if (dstLen == 0) { return false; } if (!tinyexr::DecompressRle( reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, attributes, num_attributes)) { assert(0); return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // address may not be aliged. use byte-wise copy for safety.#76 // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { if (reinterpret_cast<const unsigned char *>(line_ptr + u) >= (data_ptr + data_len)) { // Corrupsed data? return false; } unsigned int val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } return true; } static bool DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { assert(tile_offset_x * tile_size_x < data_width); assert(tile_offset_y * tile_size_y < data_height); // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { // ??? return false; } } return true; } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; info->data_window[0] = 0; info->data_window[1] = 0; info->data_window[2] = 0; info->data_window[3] = 0; info->line_order = 0; // @fixme info->display_window[0] = 0; info->display_window[1] = 0; info->display_window[2] = 0; info->display_window[3] = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) { if (0 == size) { if (err) { (*err) += "Insufficient data size for attributes.\n"; } return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { if (err) { (*err) += "Failed to read attribute.\n"; } return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (version->tiled && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!ReadChannelInfo(info->channels, data)) { if (err) { (*err) += "Failed to parse channel info.\n"; } return TINYEXR_ERROR_INVALID_DATA; } if (info->channels.size() < 1) { if (err) { (*err) += "# of channels is zero.\n"; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { if (data.size() >= 16) { memcpy(&info->data_window[0], &data.at(0), sizeof(int)); memcpy(&info->data_window[1], &data.at(4), sizeof(int)); memcpy(&info->data_window[2], &data.at(8), sizeof(int)); memcpy(&info->data_window[3], &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3])); has_data_window = true; } } else if (attr_name.compare("displayWindow") == 0) { if (data.size() >= 16) { memcpy(&info->display_window[0], &data.at(0), sizeof(int)); memcpy(&info->display_window[1], &data.at(4), sizeof(int)); memcpy(&info->display_window[2], &data.at(8), sizeof(int)); memcpy(&info->display_window[3], &data.at(12), sizeof(int)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[3])); has_display_window = true; } } else if (attr_name.compare("lineOrder") == 0) { if (data.size() >= 1) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } } else if (attr_name.compare("pixelAspectRatio") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio)); has_pixel_aspect_ratio = true; } } else if (attr_name.compare("screenWindowCenter") == 0) { if (data.size() >= 8) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[1])); has_screen_window_center = true; } } else if (attr_name.compare("screenWindowWidth") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_width)); has_screen_window_width = true; } } else if (attr_name.compare("chunkCount") == 0) { if (data.size() >= sizeof(int)) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count)); } } else { // Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header or invalid." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window[0] = info.display_window[0]; exr_header->display_window[1] = info.display_window[1]; exr_header->display_window[2] = info.display_window[2]; exr_header->display_window[3] = info.display_window[3]; exr_header->data_window[0] = info.data_window[0]; exr_header->data_window[1] = info.data_window[1]; exr_header->data_window[2] = info.data_window[2]; exr_header->data_window[3] = info.data_window[3]; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); if (exr_header->num_custom_attributes > 0) { // TODO(syoyo): Report warning when # of attributes exceeds // `TINYEXR_MAX_CUSTOM_ATTRIBUTES` if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) { exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES; } exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc( sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes))); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy poiner exr_header->custom_attributes[i].value = info.attributes[i].value; } } else { exr_header->custom_attributes = NULL; } exr_header->header_len = info.header_len; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const std::vector<tinyexr::tinyexr_uint64> &offsets, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; if ((data_width < 0) || (data_height < 0)) { if (err) { std::stringstream ss; ss << "Invalid data width or data height: " << data_width << ", " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } // Do not allow too large data_width and data_height. header invalid? { const int threshold = 1024 * 8192; // heuristics if ((data_width > threshold) || (data_height > threshold)) { if (err) { std::stringstream ss; ss << "data_with or data_height too large. data_width: " << data_width << ", " << "data_height = " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } } size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety. if (exr_header->tiled) { // value check if (exr_header->tile_size_x < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } if (exr_header->tile_size_y < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } size_t num_tiles = offsets.size(); // = # of blocks exr_image->tiles = static_cast<EXRTile *>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); int err_code = TINYEXR_SUCCESS; #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<size_t> tile_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_tiles)) { num_threads = int(num_tiles); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { size_t tile_idx = 0; while ((tile_idx = tile_count++) < num_tiles) { #else for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) { #endif // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) if (offsets[tile_idx] + sizeof(int) * 5 > size) { // TODO(LTE): atomic if (err) { (*err) += "Insufficient data size.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; break; } size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4( reinterpret_cast<unsigned int *>(&tile_coordinates[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&tile_coordinates[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&tile_coordinates[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&tile_coordinates[3])); // @todo{ LoD } if (tile_coordinates[2] != 0) { err_code = TINYEXR_ERROR_UNSUPPORTED_FEATURE; break; } if (tile_coordinates[3] != 0) { err_code = TINYEXR_ERROR_UNSUPPORTED_FEATURE; break; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len < 4 || size_t(data_len) > data_size) { // TODO(LTE): atomic if (err) { (*err) += "Insufficient data length.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; break; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; bool ret = tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); if (!ret) { // TODO(LTE): atomic if (err) { (*err) += "Failed to decode tile data.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; } exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) } })); } // num_thread loop for (auto &t : workers) { t.join(); } #else } #endif if (err_code != TINYEXR_SUCCESS) { return err_code; } exr_image->num_tiles = static_cast<int>(num_tiles); } else { // scanline format // Don't allow too large image(256GB * pixel_data_size or more). Workaround // for #104. size_t total_data_len = size_t(data_width) * size_t(data_height) * size_t(num_channels); const bool total_data_len_overflown = sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false; if ((total_data_len == 0) || total_data_len_overflown) { if (err) { std::stringstream ss; ss << "Image data size is zero or too large: width = " << data_width << ", height = " << data_height << ", channels = " << num_channels << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> y_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_blocks)) { num_threads = int(num_blocks); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int y = 0; while ((y = y_count++) < int(num_blocks)) { #else #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { #endif size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (size_t(data_len) > data_size) { invalid_data = true; } else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) { // Too large value. Assume this is invalid // 2**20 = 1048576 = heuristic value. invalid_data = true; } else if (data_len == 0) { // TODO(syoyo): May be ok to raise the threshold for example // `data_len < 4` invalid_data = true; } else { // line_no may be negative. int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window[3] + 1)); int num_lines = end_line_no - line_no; if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y // overflow check tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window[1]); if (lno > std::numeric_limits<int>::max()) { line_no = -1; // invalid } else if (lno < -std::numeric_limits<int>::max()) { line_no = -1; // invalid } else { line_no -= exr_header->data_window[1]; } if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>( exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif } if (invalid_data) { if (err) { std::stringstream ss; (*err) += "Invalid data found when decoding pixels.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0]; if (data_width >= std::numeric_limits<int>::max()) { // Issue 63 tinyexr::SetErrorMessage("Invalid data width value", err); return TINYEXR_ERROR_INVALID_DATA; } data_width++; int data_height = exr_header->data_window[3] - exr_header->data_window[1]; if (data_height >= std::numeric_limits<int>::max()) { tinyexr::SetErrorMessage("Invalid data height value", err); return TINYEXR_ERROR_INVALID_DATA; } data_height++; if ((data_width < 0) || (data_height < 0)) { tinyexr::SetErrorMessage("data width or data height is negative.", err); return TINYEXR_ERROR_INVALID_DATA; } // Do not allow too large data_width and data_height. header invalid? { const int threshold = 1024 * 8192; // heuristics if (data_width > threshold) { tinyexr::SetErrorMessage("data width too large.", err); return TINYEXR_ERROR_INVALID_DATA; } if (data_height > threshold) { tinyexr::SetErrorMessage("data height too large.", err); return TINYEXR_ERROR_INVALID_DATA; } } // Read offset tables. size_t num_blocks = 0; if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); } else if (exr_header->tiled) { // @todo { LoD } size_t num_x_tiles = static_cast<size_t>(data_width) / static_cast<size_t>(exr_header->tile_size_x); if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) < static_cast<size_t>(data_width)) { num_x_tiles++; } size_t num_y_tiles = static_cast<size_t>(data_height) / static_cast<size_t>(exr_header->tile_size_y); if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) < static_cast<size_t>(data_height)) { num_y_tiles++; } num_blocks = num_x_tiles * num_y_tiles; } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } } std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks); for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; // Issue #81 if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { tinyexr::SetErrorMessage( "Cannot reconstruct lineOffset table in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } } } { std::string e; int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } #if 1 FreeEXRImage(exr_image); #else // release memory(if exists) if ((exr_header->num_channels > 0) && exr_image && exr_image->images) { for (size_t c = 0; c < size_t(exr_header->num_channels); c++) { if (exr_image->images[c]) { free(exr_image->images[c]); exr_image->images[c] = NULL; } } free(exr_image->images); exr_image->images = NULL; } #endif } return ret; } } static void GetLayers(const EXRHeader& exr_header, std::vector<std::string>& layer_names) { // Naive implementation // Group channels by layers // go over all channel names, split by periods // collect unique names layer_names.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string full_name(exr_header.channels[c].name); const size_t pos = full_name.find_last_of('.'); if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) { full_name.erase(pos); if (std::find(layer_names.begin(), layer_names.end(), full_name) == layer_names.end()) layer_names.push_back(full_name); } } } struct LayerChannel { explicit LayerChannel (size_t i, std::string n) : index(i) , name(n) {} size_t index; std::string name; }; static void ChannelsInLayer(const EXRHeader& exr_header, const std::string layer_name, std::vector<LayerChannel>& channels) { channels.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string ch_name(exr_header.channels[c].name); if (layer_name.empty()) { const size_t pos = ch_name.find_last_of('.'); if (pos != std::string::npos && pos < ch_name.size()) { ch_name = ch_name.substr(pos + 1); } } else { const size_t pos = ch_name.find(layer_name + '.'); if (pos == std::string::npos) continue; if (pos == 0) { ch_name = ch_name.substr(layer_name.size() + 1); } } LayerChannel ch(size_t(c), ch_name); channels.push_back(ch); } } } // namespace tinyexr int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err) { EXRVersion exr_version; EXRHeader exr_header; InitEXRHeader(&exr_header); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Invalid EXR header.", err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } std::vector<std::string> layer_vec; tinyexr::GetLayers(exr_header, layer_vec); (*num_layers) = int(layer_vec.size()); (*layer_names) = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size()))); for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) { #ifdef _MSC_VER (*layer_names)[c] = _strdup(layer_vec[c].c_str()); #else (*layer_names)[c] = strdup(layer_vec[c].c_str()); #endif } FreeEXRHeader(&exr_header); return TINYEXR_SUCCESS; } int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { return LoadEXRWithLayer(out_rgba, width, height, filename, /* layername */NULL, err); } int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layername, const char **err) { if (out_rgba == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to open EXR file or read version info from EXR file. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } // TODO: Probably limit loading to layers (channels) selected by layer index { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; std::vector<std::string> layer_names; tinyexr::GetLayers(exr_header, layer_names); std::vector<tinyexr::LayerChannel> channels; tinyexr::ChannelsInLayer(exr_header, layername == NULL ? "" : std::string(layername), channels); if (channels.size() < 1) { tinyexr::SetErrorMessage("Layer Not Found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_LAYER_NOT_FOUND; } size_t ch_count = channels.size() < 4 ? channels.size() : 4; for (size_t c = 0; c < ch_count; c++) { const tinyexr::LayerChannel &ch = channels[c]; if (ch.name == "R") { idxR = int(ch.index); } else if (ch.name == "G") { idxG = int(ch.index); } else if (ch.name == "B") { idxB = int(ch.index); } else if (ch.name == "A") { idxA = int(ch.index); } } if (channels.size() == 1) { int chIdx = int(channels.front().index); // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[chIdx][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // Assume RGB(A) if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int IsEXR(const char *filename) { EXRVersion exr_version; int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { return ret; } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { tinyexr::SetErrorMessage( "Invalid argument. `memory` or `exr_header` argument is null in " "ParseEXRHeaderFromMemory()", err); // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Insufficient header/data size.\n", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { tinyexr::SetErrorMessage(err_str, err); } } ConvertHeader(exr_header, info); // transfoer `tiled` from version. exr_header->tiled = version->tiled; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to parse EXR version. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } // TODO(syoyo): Refactor removing same code as used in LoadEXR(). if (exr_header.num_channels == 1) { // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[0][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // TODO(syoyo): Support non RGBA image. if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize < 16) { tinyexr::SetErrorMessage("File size too short " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } size_t SaveEXRImageToMemory(const EXRImage *exr_image, const EXRHeader *exr_header, unsigned char **memory_out, const char **err) { if (exr_image == NULL || memory_out == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err); return 0; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #endif #if TINYEXR_USE_ZFP for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) { if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) { tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression", err); return 0; } } #endif std::vector<unsigned char> memory; // Header { const char header[] = {0x76, 0x2f, 0x31, 0x01}; memory.insert(memory.end(), header, header + 4); } // Version, scanline. { char marker[] = {2, 0, 0, 0}; /* @todo if (exr_header->tiled) { marker[1] |= 0x2; } if (exr_header->long_name) { marker[1] |= 0x4; } if (exr_header->non_image) { marker[1] |= 0x8; } if (exr_header->multipart) { marker[1] |= 0x10; } */ memory.insert(memory.end(), marker, marker + 4); } int num_scanlines = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } // Write attributes. std::vector<tinyexr::ChannelInfo> channels; { std::vector<unsigned char> data; for (int c = 0; c < exr_header->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_header->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_header->channels[c].name); channels.push_back(info); } tinyexr::WriteChannelInfo(data, channels); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_header->compression_type; tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp)); tinyexr::WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char *>(&comp), 1); } { int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3])); tinyexr::WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); tinyexr::WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { float aspectRatio = 1.0f; tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio)); tinyexr::WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float)); } { float center[2] = {0.0f, 0.0f}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[1])); tinyexr::WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float)); } { float w = static_cast<float>(exr_image->width); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char *>(&w), sizeof(float)); } // Custom attributes if (exr_header->num_custom_attributes > 0) { for (int i = 0; i < exr_header->num_custom_attributes; i++) { tinyexr::WriteAttributeToMemory( &memory, exr_header->custom_attributes[i].name, exr_header->custom_attributes[i].type, reinterpret_cast<const unsigned char *>( exr_header->custom_attributes[i].value), exr_header->custom_attributes[i].size); } } { // end of header unsigned char e = 0; memory.push_back(e); } int num_blocks = exr_image->height / num_scanlines; if (num_blocks * num_scanlines < exr_image->height) { num_blocks++; } std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks)); size_t headerSize = memory.size(); tinyexr::tinyexr_uint64 offset = headerSize + static_cast<size_t>(num_blocks) * sizeof( tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable) std::vector<std::vector<unsigned char> > data_list( static_cast<size_t>(num_blocks)); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } } #endif // TOOD(LTE): C++11 thread // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { size_t ii = static_cast<size_t>(i); int start_y = num_scanlines * i; int endY = (std::min)(num_scanlines * (i + 1), exr_image->height); int h = endY - start_y; std::vector<unsigned char> buf( static_cast<size_t>(exr_image->width * h * pixel_data_size)); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f)); // line_ptr[x] = f32.f; tinyexr::cpy4(line_ptr + x, &(f32.f)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned short val = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap2(&val); // line_ptr[x] = val; tinyexr::cpy2(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // line_ptr[x] = h16.u; tinyexr::cpy2(line_ptr + x, &(h16.u)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { float val = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned int val = reinterpret_cast<unsigned int **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } } if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(buf.size()); memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), buf.begin(), buf.begin() + data_len); } else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 8192 + static_cast<unsigned int>( 2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, exr_image->width, h); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), exr_image->width, h, exr_header->num_channels, zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else { assert(0); } } // omp parallel for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size(); } size_t totalSize = static_cast<size_t>(offset); { memory.insert( memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)), reinterpret_cast<unsigned char *>(&offsets.at(0)) + sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks)); } if (memory.size() == 0) { tinyexr::SetErrorMessage("Output memory size is zero", err); return 0; } (*memory_out) = static_cast<unsigned char *>(malloc(totalSize)); memcpy((*memory_out), &memory.at(0), memory.size()); unsigned char *memory_ptr = *memory_out + memory.size(); for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { memcpy(memory_ptr, &data_list[i].at(0), data_list[i].size()); memory_ptr += data_list[i].size(); } return totalSize; // OK } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "wb"); #else FILE *fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if (mem_size == 0) { return TINYEXR_ERROR_SERIALZATION_FAILED; } size_t written_size = 0; if ((mem_size > 0) && mem) { written_size = fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); if (written_size != mem_size) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _MSC_VER FILE *fp = NULL; errno_t errcode = fopen_s(&fp, filename, "rb"); if ((0 != errcode) || (!fp)) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); tinyexr::SetErrorMessage("File size is zero : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { tinyexr::SetErrorMessage("Invalid magic number", err); return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { tinyexr::SetErrorMessage("Unsupported version or scanline", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { marker++; size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { std::stringstream ss; ss << "Failed to parse attribute\n"; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { std::stringstream ss; ss << "Unsupported compression type : " << compression_type; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!tinyexr::ReadChannelInfo(channels, data)) { tinyexr::SetErrorMessage("Failed to parse channel info", err); return TINYEXR_ERROR_INVALID_DATA; } num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { tinyexr::SetErrorMessage("Invalid channels format", err); return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh)); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&h)); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { tinyexr::SetErrorMessage("Unsupported compression format", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) { return false; } assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); if (dstLen) { if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize))) { return false; } assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui; unsigned int *src_ptr = reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); tinyexr::cpy4(&ui, src_ptr); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; const unsigned short *src_ptr = reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::cpy2(&(f16.u), src_ptr); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f; const float *src_ptr = reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); tinyexr::cpy4(&f, src_ptr); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->num_tiles = 0; } void FreeEXRErrorMessage(const char *msg) { if (msg) { free(reinterpret_cast<void *>(const_cast<char *>(msg))); } return; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } if (exr_header->custom_attributes) { free(exr_header->custom_attributes); } return TINYEXR_SUCCESS; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } free(exr_image->tiles); } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("fread() error on " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Data size too short", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage(err_str, err); return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { tinyexr::SetErrorMessage( "`chunkCount' attribute is not found in the header.", err); return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); ConvertHeader(exr_header, infos[i]); // transfoer `tiled` from version. exr_header->tiled = exr_version->tiled; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromFile()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromMemory()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list; for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> offset_table( static_cast<size_t>(exr_headers[i]->chunk_count)); for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } chunk_offset_table_list.push_back(offset_table); } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> &offset_table = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (size_t c = 0; c < offset_table.size(); c++) { const unsigned char *part_number_addr = memory + offset_table[c] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } } std::string e; int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table, memory, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename, const char **err) { if ((components == 1) || components == 3 || components == 4) { // OK } else { std::stringstream ss; ss << "Unsupported component value : " << components << std::endl; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRHeader header; InitEXRHeader(&header); if ((width < 16) && (height < 16)) { // No compression for small image. header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE; } else { header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP; } EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = {0, 0, 0, 0}; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } int ret = SaveEXRImageToFile(&image, &header, outfilename, err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef __clang__ // zero-as-null-ppinter-constant #pragma clang diagnostic pop #endif #endif // TINYEXR_IMPLEMENTATION_DEIFNED #endif // TINYEXR_IMPLEMENTATION
alignment.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ /* Original code from the Application Kernel Matrix by Cray */ /* that was based on the ClustalW application */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <libgen.h> #include "param.h" #include "sequence.h" #include "alignment.h" #include "bots.h" int ktup, window, signif; int prot_ktup, prot_window, prot_signif; int gap_pos1, gap_pos2, mat_avscore; int nseqs, max_aa; #define MAX_ALN_LENGTH 5000 int *seqlen_array, def_aa_xref[NUMRES+1]; int *bench_output, *seq_output; double gap_open, gap_extend; double prot_gap_open, prot_gap_extend; double pw_go_penalty, pw_ge_penalty; double prot_pw_go_penalty, prot_pw_ge_penalty; char **args, **names, **seq_array; int matrix[NUMRES][NUMRES]; double gap_open_scale; double gap_extend_scale; // dnaFlag default value is false int dnaFlag = FALSE; // clustalw default value is false int clustalw = FALSE; #define INT_SCALE 100 #define MIN(a,b) ((a)<(b)?(a):(b)) #define tbgap(k) ((k) <= 0 ? 0 : tb + gh * (k)) #define tegap(k) ((k) <= 0 ? 0 : te + gh * (k)) /*********************************************************************** * : **********************************************************************/ void del(int k, int *print_ptr, int *last_print, int *displ) { if (*last_print<0) *last_print = displ[(*print_ptr)-1] -= k; else *last_print = displ[(*print_ptr)++] = -k; } /*********************************************************************** * : **********************************************************************/ void add(int v, int *print_ptr, int *last_print, int *displ) { if (*last_print < 0) { displ[(*print_ptr)-1] = v; displ[(*print_ptr)++] = *last_print; } else { *last_print = displ[(*print_ptr)++] = v; } } /*********************************************************************** * : **********************************************************************/ int calc_score(int iat, int jat, int v1, int v2, int seq1, int seq2) { int i, j, ipos, jpos; ipos = v1 + iat; jpos = v2 + jat; i = seq_array[seq1][ipos]; j = seq_array[seq2][jpos]; return (matrix[i][j]); } /*********************************************************************** * : **********************************************************************/ int get_matrix(int *matptr, int *xref, int scale) { int gg_score = 0; int gr_score = 0; int i, j, k, ti, tj, ix; int av1, av2, av3, min, max, maxres; for (i = 0; i <= max_aa; i++) for (j = 0; j <= max_aa; j++) matrix[i][j] = 0; ix = 0; maxres = 0; for (i = 0; i <= max_aa; i++) { ti = xref[i]; for (j = 0; j <= i; j++) { tj = xref[j]; if ((ti != -1) && (tj != -1)) { k = matptr[ix]; if (ti == tj) { matrix[ti][ti] = k * scale; maxres++; } else { matrix[ti][tj] = k * scale; matrix[tj][ti] = k * scale; } ix++; } } } maxres--; av1 = av2 = av3 = 0; for (i = 0; i <= max_aa; i++) { for (j = 0; j <= i; j++) { av1 += matrix[i][j]; if (i == j) av2 += matrix[i][j]; else av3 += matrix[i][j]; } } av1 /= (maxres*maxres)/2; av2 /= maxres; av3 /= (int) (((double)(maxres*maxres-maxres))/2); mat_avscore = -av3; min = max = matrix[0][0]; for (i = 0; i <= max_aa; i++) for (j = 1; j <= i; j++) { if (matrix[i][j] < min) min = matrix[i][j]; if (matrix[i][j] > max) max = matrix[i][j]; } for (i = 0; i < gap_pos1; i++) { matrix[i][gap_pos1] = gr_score; matrix[gap_pos1][i] = gr_score; matrix[i][gap_pos2] = gr_score; matrix[gap_pos2][i] = gr_score; } matrix[gap_pos1][gap_pos1] = gg_score; matrix[gap_pos2][gap_pos2] = gg_score; matrix[gap_pos2][gap_pos1] = gg_score; matrix[gap_pos1][gap_pos2] = gg_score; maxres += 2; return(maxres); } /*********************************************************************** * : **********************************************************************/ void forward_pass(char *ia, char *ib, int n, int m, int *se1, int *se2, int *maxscore, int g, int gh) { int i, j, f, p, t, hh; int HH[MAX_ALN_LENGTH]; int DD[MAX_ALN_LENGTH]; *maxscore = 0; *se1 = *se2 = 0; for (i = 0; i <= m; i++) {HH[i] = 0; DD[i] = -g;} for (i = 1; i <= n; i++) { hh = p = 0; f = -g; for (j = 1; j <= m; j++) { f -= gh; t = hh - g - gh; if (f < t) f = t; DD[j] -= gh; t = HH[j] - g - gh; if (DD[j] < t) DD[j] = t; hh = p + matrix[(int)ia[i]][(int)ib[j]]; if (hh < f) hh = f; if (hh < DD[j]) hh = DD[j]; if (hh < 0) hh = 0; p = HH[j]; HH[j] = hh; if (hh > *maxscore) {*maxscore = hh; *se1 = i; *se2 = j;} } } } /*********************************************************************** * : **********************************************************************/ void reverse_pass(char *ia, char *ib, int se1, int se2, int *sb1, int *sb2, int maxscore, int g, int gh) { int i, j, f, p, t, hh, cost; int HH[MAX_ALN_LENGTH]; int DD[MAX_ALN_LENGTH]; cost = 0; *sb1 = *sb2 = 1; for (i = se2; i > 0; i--){ HH[i] = -1; DD[i] = -1;} for (i = se1; i > 0; i--) { hh = f = -1; if (i == se1) p = 0; else p = -1; for (j = se2; j > 0; j--) { f -= gh; t = hh - g - gh; if (f < t) f = t; DD[j] -= gh; t = HH[j] - g - gh; if (DD[j] < t) DD[j] = t; hh = p + matrix[(int)ia[i]][(int)ib[j]]; if (hh < f) hh = f; if (hh < DD[j]) hh = DD[j]; p = HH[j]; HH[j] = hh; if (hh > cost) { cost = hh; *sb1 = i; *sb2 = j; if (cost >= maxscore) break; } } if (cost >= maxscore) break; } } /*********************************************************************** * : **********************************************************************/ int diff (int A, int B, int M, int N, int tb, int te, int *print_ptr, int *last_print, int *displ, int seq1, int seq2, int g, int gh) { int i, j, f, e, s, t, hh; int midi, midj, midh, type; int HH[MAX_ALN_LENGTH]; int DD[MAX_ALN_LENGTH]; int RR[MAX_ALN_LENGTH]; int SS[MAX_ALN_LENGTH]; if (N <= 0) {if (M > 0) del(M, print_ptr, last_print, displ); return( - (int) tbgap(M)); } if (M <= 1) { if (M <= 0) {add(N, print_ptr, last_print, displ); return( - (int)tbgap(N));} midh = -(tb+gh) - tegap(N); hh = -(te+gh) - tbgap(N); if (hh > midh) midh = hh; midj = 0; for (j = 1; j <= N; j++) { hh = calc_score(1,j,A,B,seq1,seq2) - tegap(N-j) - tbgap(j-1); if (hh > midh) {midh = hh; midj = j;} } if (midj == 0) { del(1, print_ptr, last_print, displ); add(N, print_ptr, last_print, displ); } else { if (midj > 1) add(midj-1, print_ptr, last_print, displ); displ[(*print_ptr)++] = *last_print = 0; if (midj < N) add(N-midj, print_ptr, last_print, displ); } return midh; } midi = M / 2; HH[0] = 0.0; t = -tb; for (j = 1; j <= N; j++) { HH[j] = t = t - gh; DD[j] = t - g; } t = -tb; for (i = 1; i <= midi; i++) { s = HH[0]; HH[0] = hh = t = t - gh; f = t - g; for (j = 1; j <= N; j++) { if ((hh = hh - g - gh) > (f = f - gh)) f = hh; if ((hh = HH[j] - g - gh) > (e = DD[j]- gh)) e = hh; hh = s + calc_score(i,j,A,B,seq1,seq2); if (f > hh) hh = f; if (e > hh) hh = e; s = HH[j]; HH[j] = hh; DD[j] = e; } } DD[0] = HH[0]; RR[N] = 0; t = -te; for (j = N-1; j >= 0; j--) {RR[j] = t = t - gh; SS[j] = t - g;} t = -te; for (i = M - 1; i >= midi; i--) { s = RR[N]; RR[N] = hh = t = t-gh; f = t - g; for (j = N - 1; j >= 0; j--) { if ((hh = hh - g - gh) > (f = f - gh)) f = hh; if ((hh = RR[j] - g - gh) > (e = SS[j] - gh)) e = hh; hh = s + calc_score(i+1,j+1,A,B,seq1,seq2); if (f > hh) hh = f; if (e > hh) hh = e; s = RR[j]; RR[j] = hh; SS[j] = e; } } SS[N] = RR[N]; midh = HH[0] + RR[0]; midj = 0; type = 1; for (j = 0; j <= N; j++) { hh = HH[j] + RR[j]; if (hh >= midh) if (hh > midh || (HH[j] != DD[j] && RR[j] == SS[j])) {midh = hh; midj = j;} } for (j = N; j >= 0; j--) { hh = DD[j] + SS[j] + g; if (hh > midh) {midh = hh;midj = j;type = 2;} } if (type == 1) { diff(A, B, midi, midj, tb, g, print_ptr, last_print, displ, seq1, seq2, g, gh); diff(A+midi, B+midj, M-midi, N-midj, g, te, print_ptr, last_print, displ, seq1, seq2, g, gh); } else { diff(A, B, midi-1, midj, tb, 0.0, print_ptr, last_print, displ, seq1, seq2, g, gh); del(2, print_ptr, last_print, displ); diff(A+midi+1, B+midj, M-midi-1, N-midj, 0.0, te, print_ptr, last_print, displ, seq1, seq2, g, gh); } return midh; } /*********************************************************************** * : **********************************************************************/ double tracepath(int tsb1, int tsb2, int *print_ptr, int *displ, int seq1, int seq2) { int i, k; int i1 = tsb1; int i2 = tsb2; int pos = 0; int count = 0; for (i = 1; i <= *print_ptr - 1; ++i) { if (displ[i]==0) { char c1 = seq_array[seq1][i1]; char c2 = seq_array[seq2][i2]; if ((c1!=gap_pos1) && (c1 != gap_pos2) && (c1 == c2)) count++; ++i1; ++i2; ++pos; } else if ((k = displ[i]) > 0) { i2 += k; pos += k; } else { i1 -= k; pos -= k; } } return (100.0 * (double) count); } int pairalign() { int i, n, m, si, sj; int len1, len2, maxres; double gg, mm_score; int *mat_xref, *matptr; matptr = gon250mt; mat_xref = def_aa_xref; maxres = get_matrix(matptr, mat_xref, 10); if (maxres == 0) return(-1); bots_message("Start aligning "); #pragma omp parallel { #pragma omp single private(i,n,si,sj,len1,m) for (si = 0; si < nseqs; si++) { n = seqlen_array[si+1]; for (i = 1, len1 = 0; i <= n; i++) { char c = seq_array[si+1][i]; if ((c != gap_pos1) && (c != gap_pos2)) len1++; } for (sj = si + 1; sj < nseqs; sj++) { m = seqlen_array[sj+1]; if ( n == 0 || m == 0 ) { bench_output[si*nseqs+sj] = (int) 1.0; } else { #pragma omp task untied \ private(i,gg,len2,mm_score) firstprivate(m,n,si,sj,len1) \ shared(nseqs, bench_output,seqlen_array,seq_array,gap_pos1,gap_pos2,pw_ge_penalty,pw_go_penalty,mat_avscore) { int se1, se2, sb1, sb2, maxscore, seq1, seq2, g, gh; int displ[2*MAX_ALN_LENGTH+1]; int print_ptr, last_print; for (i = 1, len2 = 0; i <= m; i++) { char c = seq_array[sj+1][i]; if ((c != gap_pos1) && (c != gap_pos2)) len2++; } if ( dnaFlag == TRUE ) { g = (int) ( 2 * INT_SCALE * pw_go_penalty * gap_open_scale ); // gapOpen gh = (int) (INT_SCALE * pw_ge_penalty * gap_extend_scale); //gapExtend } else { gg = pw_go_penalty + log((double) MIN(n, m)); // temporary value g = (int) ((mat_avscore <= 0) ? (2 * INT_SCALE * gg) : (2 * mat_avscore * gg * gap_open_scale) ); // gapOpen gh = (int) (INT_SCALE * pw_ge_penalty); //gapExtend } seq1 = si + 1; seq2 = sj + 1; forward_pass(&seq_array[seq1][0], &seq_array[seq2][0], n, m, &se1, &se2, &maxscore, g, gh); reverse_pass(&seq_array[seq1][0], &seq_array[seq2][0], se1, se2, &sb1, &sb2, maxscore, g, gh); print_ptr = 1; last_print = 0; diff(sb1-1, sb2-1, se1-sb1+1, se2-sb2+1, 0, 0, &print_ptr, &last_print, displ, seq1, seq2, g, gh); mm_score = tracepath(sb1, sb2, &print_ptr, displ, seq1, seq2); if (len1 == 0 || len2 == 0) mm_score = 0.0; else mm_score /= (double) MIN(len1,len2); bench_output[si*nseqs+sj] = (int) mm_score; } // end task } // end if (n == 0 || m == 0) } // for (j) } // end parallel for (i) } // end parallel bots_message(" completed!\n"); return 0; } int pairalign_seq() { int i, n, m, si, sj; int len1, len2, maxres; double gg, mm_score; int *mat_xref, *matptr; matptr = gon250mt; mat_xref = def_aa_xref; maxres = get_matrix(matptr, mat_xref, 10); if (maxres == 0) return(-1); for (si = 0; si < nseqs; si++) { n = seqlen_array[si+1]; for (i = 1, len1 = 0; i <= n; i++) { char c = seq_array[si+1][i]; if ((c != gap_pos1) && (c != gap_pos2)) len1++; } for (sj = si + 1; sj < nseqs; sj++) { m = seqlen_array[sj+1]; if ( n == 0 || m == 0 ) { seq_output[si*nseqs+sj] = (int) 1.0; } else { int se1, se2, sb1, sb2, maxscore, seq1, seq2, g, gh; int displ[2*MAX_ALN_LENGTH+1]; int print_ptr, last_print; for (i = 1, len2 = 0; i <= m; i++) { char c = seq_array[sj+1][i]; if ((c != gap_pos1) && (c != gap_pos2)) len2++; } if ( dnaFlag == TRUE ) { g = (int) ( 2 * INT_SCALE * pw_go_penalty * gap_open_scale ); // gapOpen gh = (int) (INT_SCALE * pw_ge_penalty * gap_extend_scale); //gapExtend } else { gg = pw_go_penalty + log((double) MIN(n, m)); // temporary value g = (int) ((mat_avscore <= 0) ? (2 * INT_SCALE * gg) : (2 * mat_avscore * gg * gap_open_scale) ); // gapOpen gh = (int) (INT_SCALE * pw_ge_penalty); //gapExtend } seq1 = si + 1; seq2 = sj + 1; forward_pass(&seq_array[seq1][0], &seq_array[seq2][0], n, m, &se1, &se2, &maxscore, g, gh); reverse_pass(&seq_array[seq1][0], &seq_array[seq2][0], se1, se2, &sb1, &sb2, maxscore, g, gh); print_ptr = 1; last_print = 0; diff(sb1-1, sb2-1, se1-sb1+1, se2-sb2+1, 0, 0, &print_ptr, &last_print, displ, seq1, seq2, g, gh); mm_score = tracepath(sb1, sb2, &print_ptr, displ, seq1, seq2); if (len1 == 0 || len2 == 0) mm_score = 0.0; else mm_score /= (double) MIN(len1,len2); seq_output[si*nseqs+sj] = (int) mm_score; } } } return 0; } /*********************************************************************** * : **********************************************************************/ void init_matrix(void) { int i, j; char c1, c2; gap_pos1 = NUMRES - 2; gap_pos2 = NUMRES - 1; max_aa = strlen(amino_acid_codes) - 2; for (i = 0; i < NUMRES; i++) def_aa_xref[i] = -1; for (i = 0; (c1 = amino_acid_order[i]); i++) for (j = 0; (c2 = amino_acid_codes[j]); j++) if (c1 == c2) {def_aa_xref[i] = j; break;} } void pairalign_init (char *filename) { int i; if (!filename || !filename[0]) { bots_error(0, "Please specify an input file with the -f option\n"); } init_matrix(); nseqs = readseqs(filename); bots_message("Multiple Pairwise Alignment (%d sequences)\n",nseqs); for (i = 1; i <= nseqs; i++) bots_debug("Sequence %d: %s %6.d aa\n", i, names[i], seqlen_array[i]); if ( clustalw == TRUE ) { gap_open_scale = 0.6667; gap_extend_scale = 0.751; } else { gap_open_scale = 1.0; gap_extend_scale = 1.0; } if ( dnaFlag == TRUE ) { // Using DNA parameters ktup = 2; window = 4; signif = 4; gap_open = 15.00; gap_extend = 6.66; pw_go_penalty = 15.00; pw_ge_penalty = 6.66; } else { // Using protein parameters ktup = 1; window = 5; signif = 5; gap_open = 10.0; gap_extend = 0.2; pw_go_penalty = 10.0; pw_ge_penalty = 0.1; } } void align_init () { int i,j; bench_output = (int *) malloc(sizeof(int)*nseqs*nseqs); for(i = 0; i<nseqs; i++) for(j = 0; j<nseqs; j++) bench_output[i*nseqs+j] = 0; } void align() { pairalign(); } void align_seq_init () { int i,j; seq_output = (int *) malloc(sizeof(int)*nseqs*nseqs); bench_output = (int *) malloc(sizeof(int)*nseqs*nseqs); for(i = 0; i<nseqs; i++) for(j = 0; j<nseqs; j++) seq_output[i*nseqs+j] = 0; } void align_seq() { pairalign_seq(); } void align_end () { int i,j; for(i = 0; i<nseqs; i++) for(j = 0; j<nseqs; j++) if (bench_output[i*nseqs+j] != 0) bots_debug("Benchmark sequences (%d:%d) Aligned. Score: %d\n", i+1 , j+1 , (int) bench_output[i*nseqs+j]); } int align_verify () { int i,j; int result = BOTS_RESULT_SUCCESSFUL; for(i = 0; i<nseqs; i++) { for(j = 0; j<nseqs; j++) { if (bench_output[i*nseqs+j] != seq_output[i*nseqs+j]) { bots_message("Error: Optimized prot. (%3d:%3d)=%5d Sequential prot. (%3d:%3d)=%5d\n", i+1, j+1, (int) bench_output[i*nseqs+j], i+1, j+1, (int) seq_output[i*nseqs+j]); result = BOTS_RESULT_UNSUCCESSFUL; } } } return result; }
greensconvolution_fast_c.c
#ifndef __OPENCL_VERSION__ /* only for non-opencl */ #include <stdint.h> #include <math.h> #include <stdio.h> #include <assert.h> #include "greensconvolution_fast_c.h" #define OPENCL_GLOBAL #define OPENCL_KERNEL #define CONSTGLOBAL static const #define USE_OPENMP #define GLOBAL_NULL NULL #else /* __OPENCL_VERSION__ */ #ifndef NULL #define NULL ((void *)0L) #endif #define GLOBAL_NULL ((__global void *)0L) #define CONSTGLOBAL __constant #define OPENCL_GLOBAL __global #define OPENCL_KERNEL __kernel #endif /* __OPENCL_VERSION__ */ #ifndef TRUE #define TRUE (!0) #endif #ifndef FALSE #define FALSE (0) #endif // Following the logic at the top of imagesources_curved.c, // the curved case is identical to the flat case, // using the same leading coefficients. // The differences are: // The empirically determined curvature-dependent coefficient. // Predicted_T *= 1.0/(1.0 + tc*coeffs + dc*coeff2 + tc2*coeffs**2.0 + dc2*coeff2**2 + tcdc*coeffs*coeffs + tc3*coeffs**3.0 + tc2dc*coeffs**2.0*coeff2) // // and // Predicted_T[:,concave_in_x] *= np.exp( -(a/(4.0*alphaz*Pred_t[:,:,0])*(1.0 + source_depth*Pred_kx[:,concave_in_x])*Pred_x[:,concave_in_x]**2)) // or // Predicted_T[:,~concave_in_x] *= np.exp( -(a/(4.0*alphaz*Pred_t[:,:,0])*(1.0 + source_depth/((1.0/Pred_kx[:,~concave_in_x])+source_depth ))*Pred_x[:,~concave_in_x]**2)) // (to analyze the above factors, we use the second, and also roll in the exp(-z^2/4alphat) // Where a=alphaz/alphaxy // coeff2 is source_depth*curvature // Pred_x is rvec // .... replaces exp(-x^2/(4*alphaxy*t) in the uncurved version /* Noncurved function: integral from greensfcn_doc.tex (leading coefficient divided by z^3v^2) times integral from 0 to 1 of: doublereal funct_(doublereal *x, doublereal *v, doublereal *a) { return pow(*x,-1.5f)*pow(1.0f-(*x),-1.5f)*exp(-(1.0f+(*a)*(*x))/((*v)*(*x)*(1.0f-(*x)))); } ... How to apply the differences? * The empirically determined curvature-dependent coefficient. has z dependence and curvature dependence but is outside of the integral. * The exponential factor is part of the integrand and needs to be converted to the dimensionless variables for efficient computation. To do this, roll in the exp(-z^2/(4alphaz t) ) factor ... substitute a, substitute t-tau for Pred_t, substitute z for source_depth // Predicted_T[:,~concave_in_x] *= np.exp( -z^2/(4alphaz (t-tau)) -(1/(4.0*alphaxy*(t-tau)))*(1.0 + z/((1.0/curvature) + z))*Pred_x[:,~concave_in_x]**2) // substitute (t-tau)=(v-u) * (z^2/(4alphaz)), // Predicted_T[:,~concave_in_x] *= np.exp( -z^2/(4alphaz (v-u)*z^2/4alphaz) -(1/(4.0*alphaxy*(v-u)*(z^2/(4alphaz)))*(1.0 + z/((1.0/curvature) + z ))*Pred_x[:,~concave_in_x]**2)) // Predicted_T[:,~concave_in_x] *= np.exp( -1/(v-u) -(alphaz/(alphaxy*(v-u)*(z^2)))*(1.0 + z/((1.0/curvature) + z ))*Pred_x[:,~concave_in_x]**2)) // Predicted_T[:,~concave_in_x] *= np.exp( -1/(v-u) -(alphaz/alphaxy)*(1/(v-u))*(1/z^2)*(1.0 + z/((1.0/curvature) + z ))*Pred_x[:,~concave_in_x]**2)) // In this context, Pred_x represents the in-plane measurement distance // so cedilla^2 = (pred_x^2(alphaz/alphaxy) + z^2)/z^2 // so cedilla^2z^2 - z^2= pred_x^2(alphaz/alphaxy) // pred_x^2 = (cedilla^2 - 1)*z^2 * alphaxy/alphaz // Predicted_T[:,~concave_in_x] *= np.exp( -1/(v-u) -(alphaz/alphaxy)*(1/(v-u))*(1/z^2)*(1.0 + z/((1.0/curvature) + z ))*(cedilla^2-1) * z^2 * (alphaxy/alphaz) )) // Cancel alphaz/alphaxy and z^2 // Predicted_T[:,~concave_in_x] *= np.exp( -1/(v-u) -(1/(v-u))*(1.0 + z/((1.0/curvature) + z ))*(cedilla^2-1) )) // Predicted_T[:,~concave_in_x] *= np.exp( -1/(v-u) -(1/(v-u))*(1.0 + z*curvature/(1.0 + z*curvature ))*(cedilla^2-1) )) // Predicted_T[:,~concave_in_x] *= np.exp( -(1/(v-u))(1 + (1.0 + z*curvature/(1.0 + z*curvature ))*(cedilla^2-1) )) // Predicted_T[:,~concave_in_x] *= np.exp( -(1/(v-u))(1 + (cedilla^2-1 + (z*curvature/(1.0 + z*curvature))*(cedilla^2-1) ))) // Predicted_T[:,~concave_in_x] *= np.exp( -(1/(v-u))(cedilla^2 + (z*curvature/(1.0 + z*curvature))*(cedilla^2-1))) // This replaces the np.exp( -(cedilla^2/(v-u))) factor in the flat case // Add in rest of exponential factor from greensfcn_doc.pdf (i.e. exp(-1/u)) // Predicted_T[:,~concave_in_x] *= np.exp(-(1/u) -(1/(v-u))(cedilla^2 + (z*curvature/(1.0 + z*curvature))*(cedilla^2-1))) // ******* // Substitute x=u/v as in the bottom of greensfcn_doc.pdf // Predicted_T[:,~concave_in_x] *= np.exp(-(1/xv) -(1/(v-xv))(cedilla^2 + (z*curvature/(1.0 + z*curvature))*(cedilla^2-1))) // Common denominator // Predicted_T[:,~concave_in_x] *= np.exp(-((1 - x + x(cedilla^2 + (z*curvature/(1.0 + z*curvature))*(cedilla^2-1)))/(xv(1-x)))) // let a = (cedilla^2 + (z*curvature/(1.0 + z*curvature))*(cedilla^2-1))-1 # Not to be confused with the a=alphaz/alphaxy used above // Predicted_T[:,~concave_in_x] *= np.exp(-(1 +ax)/(xv(1-x))) // ... Which is the same as the regular Greensconvolution except for the redefinition of a!!!!! // Simplify a: // a = cedilla^2 + (z*curvature/(1.0 + z*curvature))*cedilla^2 -(z*curvature/(1.0 + z*curvature)) -1 # Not to be confused with the a=alphaz/alphaxy used above // a = cedilla^2(1 + (z*curvature/(1.0 + z*curvature))) - (z*curvature/(1.0 + z*curvature)) -1 # Not to be confused with the a=alphaz/alphaxy used above // let w = (z*curvature/(1.0+z*curvature)) // a = cedilla^2(1+w) - (1+w) // a = (cedilla^2-1)(1+w) // NOTE: the z*curvature in the denominator of w is only present in the convex case! // The implementation below is in terms of cedilla and v, // not cedilla and a. // Continuing above derivation from line marked as ******** // Predicted_T[:,~concave_in_x] *= np.exp(-(1/u) -(1/(v-u))(cedilla^2 + (z*curvature/(1.0 + z*curvature))*(cedilla^2-1))) // let w = (z*curvature/(1.0+z*curvature)) ( convex case, i.e. curvature < 0 ) // or w = (z*curvature) (concave case, i.e. curvature > 0) // Predicted_T[:,~concave_in_x] *= np.exp(-(1/u) -(1/(v-u))(cedilla^2 + w*(cedilla^2-1))) // So what had been just cedilla^2 in the flat formula is now (cedilla^2 + w*(cedilla^2-1)) // or curved_cedilla^2 = flat_cedilla^2 + w*(flat_cedilla^2-1) // or curved_cedilla^2 = flat_cedilla^2 + w*flat_cedilla^2-w) // or curved_cedilla^2 = flat_cedilla^2(1+w) - w) // or curved_cedilla^2 = flat_cedilla^2(1+w) +1 -1 - w) // or curved_cedilla^2 = flat_cedilla^2(1+w) +1 -(1+w) // or curved_cedilla^2 = (flat_cedilla^2-1)(1+w) + 1 // or curved_cedilla^2 = ((r_conductivityscaled/z)^2-1)(1+w) +1 // in the convex case (negative curvature) as the denominator of w i.e. (1+z*curvature) gets too small // (or becomes zero or negative in the extreme case), what that is really doing // is making the factor (1+w) approach zero. To avoid numerical trouble and non-physical // situations, we just force (1+w) to be positive or zero. */ // Coefficients for curved case , from eval_curved_laminate.py 10/25/16 02:32pm // THESE PROBABLY NEED TO BE RECALCULATED FOR THE SIMPLIFIED LAMINATE // OR DETERMINED FROM THEORY!!! /* CONSTGLOBAL float tc=0.40211414; CONSTGLOBAL float dc=-0.00423593; CONSTGLOBAL float tc2= -0.03041781; CONSTGLOBAL float dc2=0.0027296; CONSTGLOBAL float tcdc=-0.05028247; CONSTGLOBAL float tc3=-0.00087789; CONSTGLOBAL float tc2dc=0.00723961; */ static inline float inner_sin_sq(float x) { if (fabs(x) < M_PI/4.0f) { return pow(sin(x),2.0f); } else { return 0.25 + pow(fabs(x)-((float)M_PI)/4.0f+0.5f,2.0f); } } static inline void greensconvolution_integrate_anisotropic_c_one(uint64_t itercnt, OPENCL_GLOBAL const float *vrange,uint64_t nvrange, //number of rows in integraleval OPENCL_GLOBAL const float *crange,uint64_t ncrange, // number of cols in integraleval OPENCL_GLOBAL const float *integraleval, OPENCL_GLOBAL const float *integral_dintegranddveval, // same size as integraleval OPENCL_GLOBAL const float *integral_dintegranddceval, // same size as integraleval OPENCL_GLOBAL const float *zvec, OPENCL_GLOBAL const uint64_t *zvecshape,OPENCL_GLOBAL const uint64_t *zvecstrides, OPENCL_GLOBAL const float *xvec, OPENCL_GLOBAL const uint64_t *xvecshape,OPENCL_GLOBAL const uint64_t *xvecstrides, OPENCL_GLOBAL const float *tvec, OPENCL_GLOBAL const uint64_t *tvecshape,OPENCL_GLOBAL const uint64_t *tvecstrides, float yval, OPENCL_GLOBAL const float *curvaturevec, OPENCL_GLOBAL const uint64_t *curvaturevecshape, OPENCL_GLOBAL const uint64_t *curvaturevecstrides, OPENCL_GLOBAL const uint64_t *sumstrides, OPENCL_GLOBAL const uint64_t *shape, OPENCL_GLOBAL volatile float *result,OPENCL_GLOBAL const uint64_t *resultstrides, float coeff, OPENCL_GLOBAL const uint64_t *axissumflag, uint64_t ndim, float log10v0,float log10c0,float dlog10v,float dlog10c,float alphaz,float alphaxy,int8_t curvature_flag) { uint64_t zpos,tpos,xpos,curvaturepos,resultpos,sumcnt,sumpos; uint64_t zposbase,tposbase,xposbase,curvatureposbase; float sum=0.0f; int loopdone=FALSE; float vval,cval; uint64_t vidx,vidx2,cidx,cidx2; uint64_t point_vidx[4],point_cidx[4]; float vidxval,cidxval; float integralevalpt, integral_dintegranddvevalpt,integral_dintegranddcevalpt; float vals[4],weights[4],totalweight; float est,scalarresult; //float r_conductivityscaled_sq_ov_z_sq,coeffs,coeff2,one_plus_w; float curvcoeff; int pointcnt; int64_t axiscnt2,axispos; resultpos=itercnt; zposbase=0; tposbase=0; xposbase=0; curvatureposbase=0; //fprintf(stderr,"itercnt=%d\n",(int)itercnt); for (axiscnt2=0;axiscnt2 < ndim;axiscnt2++) { if (!axissumflag[axiscnt2]) { /* not summing over this axis */ axispos = resultpos/resultstrides[axiscnt2]; resultpos -= axispos*resultstrides[axiscnt2]; if (zvecshape[axiscnt2] > 1) { /* not broadcasting z over this axis */ zposbase += axispos*zvecstrides[axiscnt2]; } if (xvecshape[axiscnt2] > 1) { /* not broadcasting r over this axis */ xposbase += axispos*xvecstrides[axiscnt2]; } if (tvecshape[axiscnt2] > 1) { /* not broadcasting r over this axis */ tposbase += axispos*tvecstrides[axiscnt2]; } if (curvature_flag && curvaturevecshape[axiscnt2] > 1) { /* not broadcasting curvature over this axis */ curvatureposbase += axispos*curvaturevecstrides[axiscnt2]; } } } for (sumcnt=0;!loopdone;sumcnt++) { //fprintf(stderr,"sumcnt=%d\n",(int)sumcnt); zpos=zposbase; tpos=tposbase; xpos=xposbase; curvaturepos=curvatureposbase; sumpos=sumcnt; for (axiscnt2=0;axiscnt2 < ndim;axiscnt2++) { if (axissumflag[axiscnt2]) { axispos = sumpos/sumstrides[axiscnt2]; sumpos -= axispos*sumstrides[axiscnt2]; if (axispos >= shape[axiscnt2]) { loopdone=TRUE; break; } //fprintf(stderr,"iterationstrides[%lu]=%lu\n",axiscnt2,iterationstrides[axiscnt2]); /* summing over this axis */ if (zvecshape[axiscnt2] > 1) { /* not broadcasting z over this axis */ zpos += axispos*zvecstrides[axiscnt2]; } if (xvecshape[axiscnt2] > 1) { /* not broadcasting r over this axis */ xpos += axispos*xvecstrides[axiscnt2]; } if (tvecshape[axiscnt2] > 1) { /* not broadcasting r over this axis */ tpos += axispos*tvecstrides[axiscnt2]; } if (curvature_flag && curvaturevecshape[axiscnt2] > 1) { /* not broadcasting curvature over this axis */ curvaturepos += axispos*curvaturevecstrides[axiscnt2]; } } } if (sumpos > 0 || loopdone) break; //fprintf(stderr,"zpos=%d\n",(int)zpos); //fprintf(stderr,"xpos=%d\n",(int)xpos); //fprintf(stderr,"tpos=%d\n",(int)tpos); assert(tvec[tpos] > 0); vval=(4*alphaz*tvec[tpos])/pow(zvec[zpos],2.0f); // Flat case: if (!curvature_flag) { cval=fabs(sqrt(pow(xvec[xpos],2.0f)*(alphaz/alphaxy) + pow(yval,2.0f)*(alphaz/alphaxy) + pow(zvec[zpos],2.0f))/zvec[zpos]); //cval=fabs(rconductivityscaledvec[xpos]/zvec[zpos]); curvcoeff = 1.0f; } else { // curved case: see bottom of greenfcn_doc.pdf for definition of w_root_v float w_root_v = (curvaturevec[curvaturepos]*zvec[zpos]*sqrt(M_PI)/8.0f)*sqrt(vval); // bounds on w_root_v (empirical) if (w_root_v < -0.6f) { w_root_v = -0.6f; } if (w_root_v > 1.0f) { w_root_v = 1.0f; } curvcoeff = (1.0f/(1.0f + 0.8f*w_root_v)); if (curvaturevec[curvaturepos] >= 0) { // Concave // cval from greenfcn_doc.pdf eq. 38 float theta = curvaturevec[curvaturepos]*xvec[xpos]; if (fabs(theta) > M_PI/2.0f) { theta=M_PI/2.0f; } float deceleration = pow(1.0f+(1.0f/12.0f)*pow(theta,2.0f),2.0f*(1.0f - xvec[xpos]/(fabs(xvec[xpos])*fabs(theta/2.0f + pow(theta,3.0f)/24.0f)))); if (deceleration < 1.0f) { deceleration = 1.0f; } // Value for cedilla cval=sqrt(pow(xvec[xpos],2.0f)*(1.0f+zvec[zpos]*curvaturevec[curvaturepos])*deceleration*(alphaz/alphaxy) + pow(yval,2.0f)*(alphaz/alphaxy) + pow(zvec[zpos],2.0f))/zvec[zpos]; } else { // Concave // cval from greenfcn_doc.pdf eq. 38 cval=sqrt( pow((1.0f/fabs(curvaturevec[curvaturepos]))-zvec[zpos],2.0f)*(1.0f + fabs(curvaturevec[curvaturepos])*zvec[zpos]/(1.0f - fabs(curvaturevec[curvaturepos])*zvec[zpos]))*4.0f*inner_sin_sq(fabs(curvaturevec[curvaturepos])*xvec[xpos]/2.0f)*(alphaz/alphaxy) + pow(yval,2.0f)*(alphaz/alphaxy) + pow(zvec[zpos],2.0f))/zvec[zpos]; } /* Old code: coeffs=curvaturevec[zpos]*sqrt(alphaz*tvec[tpos]); coeff2=curvaturevec[zpos]*zvec[zpos]; one_plus_w=1.0+coeff2; // concave case if (curvaturevec[zpos] < 0) { // Need to bound 1+w >= 0 // where w is coeff2/(1+coeff2), coeff2 negative // and (1+coeff2) should not be negative or zero // in the bound, we force 1+w to 0 if (1.0f+coeff2 <= 0.0) { one_plus_w=0.0; // lateral flow is instantaneous } else { // constraint is that 1+w positive, i.e. // w >= -1 // coeff2/(1+coeff2) >= -1, where 1+coeff2 > 0 // coeff2 >= -(1+coeff2) // 2*coeff2 >= -1 // coeff2 >= -0.5 if (coeff2 >= -0.5) { one_plus_w = 1+coeff2/(1+coeff2); // convex case } else { one_plus_w=0.0; } } } r_conductivityscaled_sq_ov_z_sq=pow(rconductivityscaledvec[rpos]/zvec[zpos],2.0f); cval=sqrt((r_conductivityscaled_sq_ov_z_sq-1.0f)*one_plus_w + 1); // Leading factor scaling according to curvature empirically fitted coefficients coeff *= 1.0/(1.0 + tc*coeffs + dc*coeff2 + tc2*pow(coeffs,2.0f) + dc2*pow(coeff2,2.0f) + tcdc*coeffs*coeffs + tc3*pow(coeffs,3.0f) + tc2dc*pow(coeffs,2.0f)*coeff2); */ } // print("%f, %f, %f" % (log10(vval),log10v0,dlog10v)) vidx=(int64_t)((log10(vval)-log10v0)/dlog10v); // print("vidx=%d; nvrange=%d" % (vidx,nvrange)) assert(vidx >= 0 && vidx+1 < nvrange); vidx2=vidx+1; cidx=(int64_t)((log10(cval)-log10c0)/dlog10c); // print("cidx=%d; ncrange=%d" % (cidx,ncrange)) assert(cidx >= 0 && cidx+1 < ncrange); cidx2=cidx+1; point_vidx[0]=vidx; point_cidx[0]=cidx; point_vidx[1]=vidx; point_cidx[1]=cidx2; point_vidx[2]=vidx2; point_cidx[2]=cidx; point_vidx[3]=vidx2; point_cidx[3]=cidx2; totalweight=0.0f; for (pointcnt=0;pointcnt < 4;pointcnt++) { vidxval=vrange[point_vidx[pointcnt]]; cidxval=crange[point_cidx[pointcnt]]; integralevalpt=integraleval[point_vidx[pointcnt]*ncrange + point_cidx[pointcnt]]; integral_dintegranddvevalpt=integral_dintegranddveval[point_vidx[pointcnt]*ncrange + point_cidx[pointcnt]]; integral_dintegranddcevalpt=integral_dintegranddceval[point_vidx[pointcnt]*ncrange + point_cidx[pointcnt]]; vals[pointcnt] = integralevalpt + (vval-vidxval)*integral_dintegranddvevalpt +(cval-cidxval)*integral_dintegranddcevalpt; weights[pointcnt]=sqrt((float)(1.0f/(0.001f+(vval-vidxval)*(vval-vidxval) + (cval-cidxval)*(cval-cidxval)))); totalweight+=weights[pointcnt]; } est=0.0f; for (pointcnt=0;pointcnt < 4;pointcnt++) { est+=vals[pointcnt]*weights[pointcnt]/totalweight; } // Limit according to nonnegative and upper bound in greensfcn_doc.tex if (est < 0.0f) { // print("Warning: Integral gave inaccurate calculation of %g at v=%g, c=%g; lower bound of 0 used instead" % (est,vval,cval),file=sys.stderr) est=0.0f; } else if (est > 0.185f*exp((float)(-(pow(cval,2.0f)-1.0f)/vval))) { #ifndef __OPENCL_VERSION__ fprintf(stderr,"Warning: Integral gave inaccurate calculation of %g at v=%g,c=%g; upper bound of %g used instead\n",est,vval,cval,0.185f*exp((float)(-(pow(cval,2.0f)-1.0f)/vval))); #endif /* __OPENCL_VERSION__ */ est= 0.185f*exp((float)(-(pow(cval,2.0f)-1.0f)/vval)); } scalarresult=coeff*curvcoeff*est/pow(zvec[zpos],3.0f); sum+=scalarresult; } //#ifdef USE_OPENMP //#pragma omp atomic // or #pragma omp atomic update //#endif /* USE_OPENMP */ result[itercnt]=sum; //# This assignment and increment must be atomic } void greensconvolution_integrate_anisotropic_c( OPENCL_GLOBAL const float *vrange,uint64_t nvrange, //number of rows in integraleval OPENCL_GLOBAL const float *crange,uint64_t ncrange, // number of cols in integraleval OPENCL_GLOBAL const float *integraleval, OPENCL_GLOBAL const float *integral_dintegranddveval, // same size as integraleval OPENCL_GLOBAL const float *integral_dintegranddceval, // same size as integraleval OPENCL_GLOBAL const float *zvec, OPENCL_GLOBAL const uint64_t *zvecshape,OPENCL_GLOBAL const uint64_t *zvecstrides, OPENCL_GLOBAL const float *xvec, OPENCL_GLOBAL const uint64_t *xvecshape,OPENCL_GLOBAL const uint64_t *xvecstrides, OPENCL_GLOBAL const float *tvec, OPENCL_GLOBAL const uint64_t *tvecshape,OPENCL_GLOBAL const uint64_t *tvecstrides, float yval, OPENCL_GLOBAL const float *curvaturevec, OPENCL_GLOBAL const uint64_t *curvaturevecshape, OPENCL_GLOBAL const uint64_t *curvaturevecstrides, OPENCL_GLOBAL const uint64_t *sumstrides, OPENCL_GLOBAL const uint64_t *shape, float alphaz,float alphaxy,int8_t curvature_flag, OPENCL_GLOBAL float *result,OPENCL_GLOBAL const uint64_t *resultstrides, float coeff, OPENCL_GLOBAL const uint64_t *axissumflag, uint64_t ndim) { // NOTE: This is supposed to be the same code as in greensconvolution_fast.pyx/greensconvolution_integrate_anisotropic_py float log10v0,log10c0,dlog10v,dlog10c; uint64_t iterlen; int64_t itercnt; uint64_t axiscnt; iterlen=1; for (axiscnt=0;axiscnt < ndim;axiscnt++) { if (!axissumflag[axiscnt]) { iterlen *= shape[axiscnt]; } } assert(nvrange > 0); assert(ncrange > 0); log10v0=log10(vrange[0]); log10c0=log10(crange[0]); dlog10v=log10(vrange[1])-log10(vrange[0]); dlog10c=log10(crange[1])-log10(crange[0]); #ifdef USE_OPENMP #pragma omp parallel for shared(tvec,zvec,xvec,result,vrange,crange,integraleval,integral_dintegranddveval,integral_dintegranddceval,alphaz,alphaxy,curvature_flag,log10v0,log10c0,dlog10v,dlog10c,nvrange,ncrange,coeff,ndim,resultstrides,tvecshape,xvecshape,zvecshape,tvecstrides,xvecstrides,zvecstrides,yval,curvaturevec,curvaturevecshape,curvaturevecstrides,sumstrides,shape,iterlen,axissumflag) default(none) private(itercnt) #endif /* USE_OPENMP */ for (itercnt=0; itercnt < iterlen; itercnt++) { greensconvolution_integrate_anisotropic_c_one(itercnt, vrange,nvrange, //number of rows in integraleval crange,ncrange, // number of cols in integraleval integraleval, integral_dintegranddveval, // same size as integraleval integral_dintegranddceval, // same size as integraleval zvec, zvecshape,zvecstrides, xvec, xvecshape,xvecstrides, tvec, tvecshape,tvecstrides, yval, curvaturevec,curvaturevecshape,curvaturevecstrides, sumstrides, shape, result,resultstrides, coeff, axissumflag, ndim, log10v0,log10c0,dlog10v,dlog10c,alphaz,alphaxy,curvature_flag); } } #ifdef __OPENCL_VERSION__ OPENCL_KERNEL void greensconvolution_integrate_anisotropic_c_opencl(OPENCL_GLOBAL const float *vrange,uint64_t nvrange, //number of rows in integraleval OPENCL_GLOBAL const float *crange,uint64_t ncrange, // number of cols in integraleval OPENCL_GLOBAL const float *integraleval, OPENCL_GLOBAL const float *integral_dintegranddveval, // same size as integraleval OPENCL_GLOBAL const float *integral_dintegranddceval, // same size as integraleval OPENCL_GLOBAL const float *zvec, OPENCL_GLOBAL const uint64_t *zvecshape,OPENCL_GLOBAL const uint64_t *zvecstrides, OPENCL_GLOBAL const float *xvec, OPENCL_GLOBAL const uint64_t *xvecshape,OPENCL_GLOBAL const uint64_t *xvecstrides, OPENCL_GLOBAL const float *tvec, OPENCL_GLOBAL const uint64_t *tvecshape,OPENCL_GLOBAL const uint64_t *tvecstrides, float yval, OPENCL_GLOBAL const uint64_t *sumstrides,OPENCL_GLOBAL const uint64_t *shape, float alphaz,float alphaxy, OPENCL_GLOBAL float *result,OPENCL_GLOBAL const uint64_t *resultstrides, float coeff, OPENCL_GLOBAL const uint64_t *axissumflag, uint64_t ndim) { uint64_t itercnt=get_global_id(0); float log10v0,log10c0,dlog10v,dlog10c; //alphaz=kz*1.0f/(rho*cp); //alphaxyz=pow((kx*ky*kz),(1.0f/3.0f))/(rho*cp); //coeff*=2.0f*pow(alphaz,(3.0f/2.0f))/((rho*cp*M_PI*M_PI)*pow(alphaxyz,(3.0f/2.0f))); log10v0=log10(vrange[0]); log10c0=log10(crange[0]); dlog10v=log10(vrange[1])-log10(vrange[0]); dlog10c=log10(crange[1])-log10(crange[0]); greensconvolution_integrate_anisotropic_c_one(itercnt, vrange,nvrange, //number of rows in integraleval crange,ncrange, // number of cols in integraleval integraleval, integral_dintegranddveval, // same size as integraleval integral_dintegranddceval, // same size as integraleval zvec, zvecshape,zvecstrides, xvec, xvecshape,xvecstrides, tvec, tvecshape,tvecstrides, yval, GLOBAL_NULL,GLOBAL_NULL,GLOBAL_NULL, sumstrides, shape, result,resultstrides, coeff, axissumflag, ndim, log10v0,log10c0,dlog10v,dlog10c,alphaz,alphaxy,FALSE); } OPENCL_KERNEL void greensconvolution_integrate_anisotropic_curved_c_opencl(OPENCL_GLOBAL const float *vrange,uint64_t nvrange, //number of rows in integraleval OPENCL_GLOBAL const float *crange,uint64_t ncrange, // number of cols in integraleval OPENCL_GLOBAL const float *integraleval, OPENCL_GLOBAL const float *integral_dintegranddveval, // same size as integraleval OPENCL_GLOBAL const float *integral_dintegranddceval, // same size as integraleval OPENCL_GLOBAL const float *zvec, OPENCL_GLOBAL const uint64_t *zvecshape,OPENCL_GLOBAL const uint64_t *zvecstrides, OPENCL_GLOBAL const float *xvec, OPENCL_GLOBAL const uint64_t *xvecshape,OPENCL_GLOBAL const uint64_t *xvecstrides, OPENCL_GLOBAL const float *tvec, OPENCL_GLOBAL const uint64_t *tvecshape,OPENCL_GLOBAL const uint64_t *tvecstrides, float yval, OPENCL_GLOBAL const float *curvaturevec, OPENCL_GLOBAL const uint64_t *curvaturevecshape, OPENCL_GLOBAL const uint64_t *curvaturevecstrides, OPENCL_GLOBAL const uint64_t *sumstrides,OPENCL_GLOBAL const uint64_t *shape, float alphaz,float alphaxy, OPENCL_GLOBAL float *result,OPENCL_GLOBAL const uint64_t *resultstrides, float coeff, OPENCL_GLOBAL const uint64_t *axissumflag, uint64_t ndim) { uint64_t itercnt=get_global_id(0); float log10v0,log10c0,dlog10v,dlog10c; //alphaz=kz*1.0f/(rho*cp); //alphaxyz=pow((kx*ky*kz),(1.0f/3.0f))/(rho*cp); //coeff*=2.0f*pow(alphaz,(3.0f/2.0f))/((rho*cp*M_PI*M_PI)*pow(alphaxyz,(3.0f/2.0f))); log10v0=log10(vrange[0]); log10c0=log10(crange[0]); dlog10v=log10(vrange[1])-log10(vrange[0]); dlog10c=log10(crange[1])-log10(crange[0]); greensconvolution_integrate_anisotropic_c_one(itercnt, vrange,nvrange, //number of rows in integraleval crange,ncrange, // number of cols in integraleval integraleval, integral_dintegranddveval, // same size as integraleval integral_dintegranddceval, // same size as integraleval zvec, zvecshape,zvecstrides, xvec, xvecshape,xvecstrides, tvec, tvecshape,tvecstrides, yval, curvaturevec,curvaturevecshape,curvaturevecstrides, sumstrides, shape, result,resultstrides, coeff, axissumflag, ndim, log10v0,log10c0,dlog10v,dlog10c,alphaz,alphaxy,TRUE); } #endif /* __OPENCL_VERSION__ */
matrix_op-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file matrix_op-inl.h * \brief Function definition of matrix related operators */ #ifndef MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_ #define MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_ #include <mxnet/operator_util.h> #include <vector> #include <string> #include <algorithm> #include <utility> #include <type_traits> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "../channel_op_common.h" #include "../mxnet_op.h" #include "broadcast_reduce_op.h" #include "./init_op.h" #include "../../common/static_array.h" #include "./slice-inl.h" #if MXNET_USE_CUDA #include <thrust/device_vector.h> #endif #ifdef __CUDACC__ #include "./pseudo2DTranspose_op-inl.cuh" #endif namespace mxnet { namespace op { struct ReshapeParam : public dmlc::Parameter<ReshapeParam> { mxnet::TShape target_shape; bool keep_highest; mxnet::Tuple<int> shape; bool reverse; DMLC_DECLARE_PARAMETER(ReshapeParam) { DMLC_DECLARE_FIELD(shape) .set_default(mxnet::Tuple<int>()) .describe("The target shape"); DMLC_DECLARE_FIELD(reverse) .set_default(false) .describe("If true then the special values are inferred from right to left"); DMLC_DECLARE_FIELD(target_shape) .set_default(mxnet::TShape(0, -1)) .describe("(Deprecated! Use ``shape`` instead.) " "Target new shape. One and only one dim can be 0, " "in which case it will be inferred from the rest of dims"); DMLC_DECLARE_FIELD(keep_highest).set_default(false) .describe("(Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged." "If set to true, then the first dim in target_shape is ignored," "and always fixed as input"); } bool operator==(const ReshapeParam &other) const { return this->target_shape == other.target_shape && this->keep_highest == other.keep_highest && this->shape == other.shape && this->reverse == other.reverse; } }; template<typename IType> inline mxnet::TShape InferReshapeShape(const mxnet::Tuple<IType>& shape, const mxnet::TShape& dshape, bool reverse) { std::vector<IType> dshape_vec; std::vector<IType> param_shape_vec(shape.begin(), shape.end()); for (int i = 0; i < dshape.ndim(); ++i) { dshape_vec.push_back(dshape[i]); } std::vector<IType> tmp; size_t src_idx = 0; int inf_idx = -1; if (reverse) { std::reverse(dshape_vec.begin(), dshape_vec.end()); std::reverse(param_shape_vec.begin(), param_shape_vec.end()); } auto dshape_len = dshape_vec.size(); auto params_len = param_shape_vec.size(); for (size_t i = 0; i < params_len; ++i) { IType proposed_dim = param_shape_vec[i]; if (proposed_dim == 0) { // keep same CHECK_LT(src_idx, dshape_len); tmp.push_back(dshape_vec[src_idx++]); } else if (proposed_dim == -1) { // infer CHECK_LT(inf_idx, 0) << "One and only one dim can be inferred"; inf_idx = i; tmp.push_back(1); src_idx++; } else if (proposed_dim == -2) { // copy all remaining dims from source while (src_idx < dshape_len) { const int dn = dshape_vec[src_idx++]; tmp.push_back(dn); } } else if (proposed_dim == -3) { // merge two dims from source CHECK_LT(src_idx, dshape_len-1); const int d1 = dshape_vec[src_idx++]; const int d2 = dshape_vec[src_idx++]; if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) { tmp.push_back(-1); } else { tmp.push_back(d1 * d2); } } else if (proposed_dim == -4) { // split the source dim s into two dims // read the left dim and then the right dim (either can be -1) CHECK_LT(i + 2, params_len); CHECK_LT(src_idx, dshape_len); const int d0 = dshape_vec[src_idx++]; IType d1 = param_shape_vec[++i]; IType d2 = param_shape_vec[++i]; CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1."; if (d1 == -1 && d0 >= 0) d1 = d0 / d2; // d0 must be known to do this if (d2 == -1 && d0 >= 0) d2 = d0 / d1; // d0 must be known to do this CHECK(d1 * d2 == static_cast<IType>(d0) || static_cast<IType>(d0) == IType(-1)) << "Split dims " << d1 << ", " << d2 << " do not divide original dim " << d0; tmp.push_back(d1); tmp.push_back(d2); } else { // greater than 0, new shape tmp.push_back(proposed_dim); src_idx++; } } if (inf_idx >= 0) { if (shape_is_known(dshape)) { IType new_size = 1; for (IType x : tmp) new_size *= x; tmp[inf_idx] = dshape.Size() / new_size; } else { tmp[inf_idx] = -1; } } if (reverse) { std::reverse(param_shape_vec.begin(), param_shape_vec.end()); std::reverse(dshape_vec.begin(), dshape_vec.end()); std::reverse(tmp.begin(), tmp.end()); } mxnet::TShape oshape(tmp.begin(), tmp.end()); return oshape; } inline bool ReverseReshapeInferShape(mxnet::TShape *in, const mxnet::TShape& out) { if (shape_is_known(*in) && shape_is_known(out)) { return true; } else if (!shape_is_known(out)) { return false; } else { int zero_axis = -1; int known_dim_size_prod = 1; for (int i = 0; i < in->ndim(); i++) { if (!mxnet::dim_size_is_known(*in, i)) { if (zero_axis != -1) return false; // more than 1 zero found. else zero_axis = i; } else { known_dim_size_prod *= (*in)[i]; } } (*in)[zero_axis] = out.Size() / known_dim_size_prod; return true; } } inline bool ReshapeShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const ReshapeParam& param_ = nnvm::get<ReshapeParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]"; CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape &dshape = (*in_attrs)[0]; if (!mxnet::ndim_is_known(dshape)) return false; mxnet::TShape oshape; if (param_.shape.ndim() != 0) { oshape = InferReshapeShape(param_.shape, dshape, param_.reverse); } else if (param_.target_shape.ndim() != -1) { LOG(INFO) << "Using target_shape will be deprecated."; oshape = param_.target_shape; int neg_count = 0; index_t inf_idx = 0; index_t start_idx = param_.keep_highest ? 1 : 0; if (param_.keep_highest) { oshape[0] = dshape[0]; } for (int i = start_idx; i < oshape.ndim(); ++i) { if (oshape[i] == 0) { neg_count++; inf_idx = i; } } if (neg_count == 1) { oshape[inf_idx] = 1; oshape[inf_idx] = dshape.Size() / oshape.Size(); } } else { return shape_is_known((*out_attrs)[0]) && ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]); } ReverseReshapeInferShape(&dshape, oshape); #if 0 CHECK_EQ(oshape.Size(), dshape.Size()) << "Target shape size is different to source. " << "Target: " << oshape << "\nSource: " << dshape; #endif SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); return ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]); } inline bool FlattenShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]"; CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape &dshape = (*in_attrs)[0]; if (!shape_is_known(dshape)) return false; int target_dim = 1; for (int i = 1; i < dshape.ndim(); ++i) { target_dim *= dshape[i]; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::Shape2(dshape[0], target_dim)); return true; } struct TransposeParam : public dmlc::Parameter<TransposeParam> { mxnet::TShape axes; DMLC_DECLARE_PARAMETER(TransposeParam) { DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, -1)) .describe("Target axis order. By default the axes will be inverted."); } bool operator==(const TransposeParam &other) const { return this->axes == other.axes; } }; /*! * \brief This function performs transpose operation on a 2D matrix by utilizing the L1 cache * \param in input tensor * \param out output tensor * \param row shape of dim 0 of input * \param col shape of dim 1 of input * \tparam DType Data type * \tparam is_addto */ template<typename DType, bool is_addto> MSHADOW_XINLINE void Transpose2D(const DType *in, DType *out, index_t row, index_t col) { // ensure cache line hits and prevent cache miss for any configuration // L1 cache size to be utilized = 32kb = 2^15 // Largest size of a single unit of any dtype <= 8 byte = 2^3 // Number of elements - (2^15/2^3) = 2^12 // Block-size - 2^6 v 2^6 (64 v 64) // But we could leverage unrolling of for loops (for parallelization) // Block-size - 2^5 v 2^5 (32 v 32) with potential 4 pragma for loop unrolled // blocksize * blocksize * num_threads = cache_size / dtype_size // Instead of explicit unroll, let compiler figure out optimal unroll factor const index_t blocksize = 32; // collapse 2 parallelizes 2 for loops // inner 2 for loops aren't parallelized to prevent cache miss // Microsoft Visual C++ compiler does not support omp collapse #ifdef _MSC_VER #pragma omp parallel for #else #pragma omp parallel for collapse(2) #endif // _MSC_VER for (index_t i = 0; i < row; i += blocksize) { for (index_t j = 0; j < col; j += blocksize) { // transpose the block for (index_t a = j; (a < blocksize + j) && (a < col); ++a) { for (index_t b = i; (b < blocksize + i) && (b < row); ++b) { if (!is_addto) { out[a * row + b] = in[b * col + a]; } else { out[a * row + b] += in[b * col + a]; } } } } } } inline bool IsIdentityTranspose(const TShape& axes) { for (dim_t i = 0; i < axes.ndim(); i++) { if (axes[i] != i) return false; } return true; } template<typename xpu, bool is_addto = false> void TransposeImpl(RunContext ctx, const TBlob& src, const TBlob& ret, const mxnet::TShape& axes) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(src.type_flag_, ret.type_flag_); // zero-size tensor, no need to compute if (src.shape_.Size() == 0U) return; Stream<xpu> *s = ctx.get_stream<xpu>(); #ifdef __CUDACC__ // This transpose can be used only if there exist n and m such that: // params = (0, ..., n-1, n+m, ..., params.size, n, ..., n+m-1) // Example: (0, 2, 3, 1) or (0, 3, 1, 2), but not (0, 2, 1, 3). if (isPseudo2DTranspose(axes)) { MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, { transpose_pseudo2D<DType, is_addto>(ret, src, axes, s); }); return; } #endif // Special handle the identity case if (IsIdentityTranspose(axes)) { MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, { Tensor<xpu, 1, DType> in = src.get_with_shape<xpu, 1, DType>(mshadow::Shape1(src.Size()), s); Tensor<xpu, 1, DType> out = ret.get_with_shape<xpu, 1, DType>(mshadow::Shape1(ret.Size()), s); if (!is_addto) { // Use memcpy to accelerate the speed Copy(out, in, s); } else { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, kAddTo>, xpu>::Launch( s, ret.Size(), out.dptr_, in.dptr_); } }); return; } // Handle the general transpose case MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, { switch (axes.ndim()) { case 2: { Tensor<xpu, 2, DType> in = src.get<xpu, 2, DType>(s); Tensor<xpu, 2, DType> out = ret.get<xpu, 2, DType>(s); if (ctx.get_ctx().dev_mask() == cpu::kDevMask) { Transpose2D<DType, is_addto>(in.dptr_, out.dptr_, in.shape_[0], in.shape_[1]); } else { LOG(FATAL) << "Not Implemented. We should never reach here because the 2D case " "in GPU has been covered by transpose_pseudo2D." " Report an issue in Github."; } break; } case 3: { Tensor<xpu, 3, DType> in = src.get<xpu, 3, DType>(s); Tensor<xpu, 3, DType> out = ret.get<xpu, 3, DType>(s); if (!is_addto) { out = transpose(in, axes.get<3>()); } else { out += transpose(in, axes.get<3>()); } break; } case 4: { Tensor<xpu, 4, DType> in = src.get<xpu, 4, DType>(s); Tensor<xpu, 4, DType> out = ret.get<xpu, 4, DType>(s); if (!is_addto) { out = transpose(in, axes.get<4>()); } else { out += transpose(in, axes.get<4>()); } break; } case 5: { Tensor<xpu, 5, DType> in = src.get<xpu, 5, DType>(s); Tensor<xpu, 5, DType> out = ret.get<xpu, 5, DType>(s); if (!is_addto) { out = transpose(in, axes.get<5>()); } else { out += transpose(in, axes.get<5>()); } break; } case 6: { Tensor<xpu, 6, DType> in = src.get<xpu, 6, DType>(s); Tensor<xpu, 6, DType> out = ret.get<xpu, 6, DType>(s); if (!is_addto) { out = transpose(in, axes.get<6>()); } else { out += transpose(in, axes.get<6>()); } break; } default: LOG(FATAL) << "Transpose support at most 6 dimensions"; break; } }); } // matrix transpose template<typename xpu> void Transpose(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { if (req[0] == kNullOp) { return; } const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed); CHECK(req[0] == kWriteTo || req[0] == kAddTo) << "Transpose only supports kNullOp, kWriteTo and kAddTo"; mxnet::TShape axes; if (param.axes.ndim() == 0) { axes = mxnet::TShape(inputs[0].ndim(), -1); for (int i = 0; i < axes.ndim(); ++i) { axes[i] = axes.ndim() - 1 - i; } } else { axes = common::CanonicalizeAxes(param.axes); } if (req[0] == kAddTo) { TransposeImpl<xpu, true>(ctx.run_ctx, inputs[0], outputs[0], axes); } else { TransposeImpl<xpu, false>(ctx.run_ctx, inputs[0], outputs[0], axes); } } inline bool TransposeShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape& shp = (*in_attrs)[0]; mxnet::TShape& out_shp = (*out_attrs)[0]; if (!mxnet::ndim_is_known(shp) && !mxnet::ndim_is_known(out_shp)) return false; // none of the shapes is known CHECK_LE(shp.ndim(), 6) << "Transpose support at most 6 dimensions"; if (out_shp.ndim() >= 0 && shp.ndim() >= 0) CHECK_EQ(out_shp.ndim(), shp.ndim()); mxnet::TShape get(std::max(shp.ndim(), out_shp.ndim()), -1); mxnet::TShape ret(std::max(shp.ndim(), out_shp.ndim()), -1); if (param.axes.ndim() == 0) { for (int i = 0; i < shp.ndim(); ++i) { ret[i] = shp[shp.ndim()-1-i]; } for (int i = 0; i < out_shp.ndim(); ++i) { get[shp.ndim()-1-i] = out_shp[i]; } } else { CHECK_EQ(std::max(shp.ndim(), out_shp.ndim()), param.axes.ndim()); for (int i = 0; i < shp.ndim(); ++i) { CHECK(param.axes[i] < static_cast<int64_t>(shp.ndim())); ret[i] = shp[param.axes[i]]; } for (int i = 0; i < out_shp.ndim(); ++i) { get[param.axes[i]] = out_shp[i]; } } SHAPE_ASSIGN_CHECK(*in_attrs, 0, get); SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret); return shape_is_known(ret); } struct ExpandDimParam : public dmlc::Parameter<ExpandDimParam> { int axis; DMLC_DECLARE_PARAMETER(ExpandDimParam) { DMLC_DECLARE_FIELD(axis) .describe("Position where new axis is to be inserted. Suppose that " "the input `NDArray`'s dimension is `ndim`, the range of " "the inserted axis is `[-ndim, ndim]`"); } bool operator==(const ExpandDimParam &other) const { return this->axis == other.axis; } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream axis_s; axis_s << axis; (*dict)["axis"] = axis_s.str(); } }; inline bool ExpandDimShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const ExpandDimParam& param = nnvm::get<ExpandDimParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape& ishape = (*in_attrs)[0]; mxnet::TShape& oshape = (*out_attrs)[0]; if (!mxnet::ndim_is_known(ishape) && !mxnet::ndim_is_known(oshape)) { return false; } int indim = ishape.ndim(); bool unknown_ishape = false; if (-1 == indim) { indim = oshape.ndim() - 1; unknown_ishape = true; } int axis = param.axis; if (axis < 0) { axis += indim + 1; } CHECK(axis >= 0 && axis <= indim) << "axis must be in the range [" << -indim << ", " << indim << "] (" << param.axis << " provided)"; mxnet::TShape ret(indim + 1, -1); for (int i = 0; i < axis; ++i) { ret[i] = (unknown_ishape? -1 : ishape[i]); } ret[axis] = 1; for (int i = axis+1; i < indim+1; ++i) { ret[i] = (unknown_ishape? -1 : ishape[i-1]); } SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret); ret = mxnet::TShape(indim, -1); for (int i = 0; i < axis; ++i) ret[i] = oshape[i]; for (int i = axis+1; i < indim+1; ++i) ret[i-1] = oshape[i]; SHAPE_ASSIGN_CHECK(*in_attrs, 0, ret); return shape_is_known(in_attrs->at(0)) && shape_is_known(out_attrs->at(0)); } // Currently MKLDNN only supports step = 1 or step has no value inline bool SupportMKLDNNSlice(const SliceParam& param) { if (param.step.ndim() == 0U) return true; for (int i = 0; i < param.step.ndim(); ++i) { if (param.step[i].has_value() && param.step[i].value() != 1) return false; } return true; } inline bool SliceForwardInferStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, DispatchMode* dispatch_mode, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1); CHECK_EQ(out_attrs->size(), 1); const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); const auto& in_stype = in_attrs->at(0); auto& out_stype = out_attrs->at(0); bool dispatched = false; const auto dispatch_ex = DispatchMode::kFComputeEx; // If step = 1, no need to fallback; otherwise fallback to dense bool trivial_step = false; if (param.step.ndim() == 0U) { trivial_step = true; } else if (param.step.ndim() == 1U && (!param.step[0].has_value() || param.step[0].value() == 1)) { trivial_step = true; } if (in_stype == kDefaultStorage) { #if MXNET_USE_MKLDNN == 1 if (dev_mask == Context::kCPU && MKLDNNEnvSet() && SupportMKLDNNSlice(param)) { dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, dispatch_ex); } #endif if (!dispatched) { dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } } if (!dispatched && in_stype == kCSRStorage && trivial_step) { dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched) { dispatched = dispatch_fallback(out_attrs, dispatch_mode); } return dispatched; } // slice the indptr of a csr struct SliceCsrIndPtr { template<typename IType> MSHADOW_XINLINE static void Map(int i, IType* out, const IType* in, const IType* base) { KERNEL_ASSIGN(out[i], kWriteTo, in[i] - *base); } }; /* * a wrapper to launch SliceCsrIndPtr kernel. * slice [src[begin] .. src[end]) and store in dst[0, end - begin) */ template<typename xpu, typename IType> void SliceCsrIndPtrImpl(const int begin, const int end, RunContext ctx, const IType* src, IType* dst) { using namespace mshadow; using namespace mxnet_op; Stream<xpu> *s = ctx.get_stream<xpu>(); int indptr_len = end - begin + 1; Kernel<SliceCsrIndPtr, xpu>::Launch(s, indptr_len, dst, src + begin, src + begin); } /* * Slice a CSR NDArray for first dimension */ template<typename xpu> void SliceDimOneCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx, const NDArray &in, const NDArray &out) { using namespace mshadow; using namespace mxnet_op; using namespace csr; nnvm::dim_t begin_row = begin[0]; nnvm::dim_t end_row = end[0]; nnvm::dim_t indptr_len = end_row - begin_row + 1; out.CheckAndAllocAuxData(kIndPtr, Shape1(indptr_len)); // assume idx indptr share the same type MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIndPtr), RType, { MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIdx), IType, { MSHADOW_TYPE_SWITCH(in.dtype(), DType, { RType* in_indptr = in.aux_data(kIndPtr).dptr<RType>(); RType* out_indptr = out.aux_data(kIndPtr).dptr<RType>(); SliceCsrIndPtrImpl<xpu, RType>(begin_row, end_row, ctx.run_ctx, in_indptr, out_indptr); Stream<xpu> *s = ctx.get_stream<xpu>(); RType nnz = 0; mshadow::Copy(Tensor<cpu, 1, RType>(&nnz, Shape1(1)), Tensor<xpu, 1, RType>(out_indptr + indptr_len - 1, Shape1(1), s)); // return csr zeros if nnz = 0 if (nnz == 0) { out.set_aux_shape(kIdx, Shape1(0)); return; } // copy indices and values out.CheckAndAllocAuxData(kIdx, Shape1(nnz)); out.CheckAndAllocData(Shape1(nnz)); IType* in_idx = in.aux_data(kIdx).dptr<IType>(); IType* out_idx = out.aux_data(kIdx).dptr<IType>(); DType* in_data = in.data().dptr<DType>(); DType* out_data = out.data().dptr<DType>(); RType offset = 0; mshadow::Copy(Tensor<cpu, 1, RType>(&offset, Shape1(1)), Tensor<xpu, 1, RType>(in_indptr + begin_row, Shape1(1), s)); mshadow::Copy(Tensor<xpu, 1, IType>(out_idx, Shape1(nnz), s), Tensor<xpu, 1, IType>(in_idx + offset, Shape1(nnz), s), s); mshadow::Copy(Tensor<xpu, 1, DType>(out_data, Shape1(nnz), s), Tensor<xpu, 1, DType>(in_data + offset, Shape1(nnz), s), s); }); }); }); } /*! * \brief slice a CSRNDArray for two dimensions */ struct SliceDimTwoCsrAssign { /*! * \brief This function slices a CSRNDArray on axis one between begin_col and end_col * \param i loop index * \param out_idx output csr ndarray column indices * \param out_data output csr ndarray data * \param out_indptr output csr ndarray row index pointer * \param in_idx input csr ndarray column indices * \param in_data input csr ndarray data * \param in_indptr input csr ndarray row index pointer * \param begin_col begin column indice * \param end_col end column indice */ template<typename IType, typename RType, typename DType> MSHADOW_XINLINE static void Map(int i, IType* out_idx, DType* out_data, const RType* out_indptr, const IType* in_idx, const DType* in_data, const RType* in_indptr, const int begin_col, const int end_col) { RType ind = out_indptr[i]; for (RType j = in_indptr[i]; j < in_indptr[i+1]; j++) { // indices of CSRNDArray are in ascending order per row if (in_idx[j] >= end_col) { break; } else if (in_idx[j] >= begin_col) { out_idx[ind] = in_idx[j] - begin_col; out_data[ind] = in_data[j]; ind++; } } } }; /* * Slice a CSR NDArray for two dimensions */ template<typename xpu> void SliceDimTwoCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx, const NDArray &in, const NDArray &out); template<typename xpu> void SliceCsrImpl(const SliceParam &param, const OpContext& ctx, const NDArray &in, OpReqType req, const NDArray &out) { if (req == kNullOp) return; CHECK_NE(req, kAddTo) << "kAddTo for Slice on CSR input is not supported"; CHECK_NE(req, kWriteInplace) << "kWriteInplace for Slice on CSR input is not supported"; const mxnet::TShape ishape = in.shape(); const mxnet::TShape oshape = out.shape(); int N = ishape.ndim(); mxnet::TShape begin(N, -1), end(N, -1); for (int i = 0; i < N; ++i) { int s = 0; if (i < param.begin.ndim() && param.begin[i]) { s = *param.begin[i]; if (s < 0) s += ishape[i]; } begin[i] = s; end[i] = s + oshape[i]; } switch (N) { case 1: { SliceDimOneCsrImpl<xpu>(begin, end, ctx, in, out); break; } case 2: { SliceDimTwoCsrImpl<xpu>(begin, end, ctx, in, out); break; } default: LOG(FATAL) << "CSR is only for 2-D shape"; break; } } template<typename xpu> void SliceEx(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { CHECK_EQ(inputs.size(), 1); CHECK_EQ(outputs.size(), 1); const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); auto in_stype = inputs[0].storage_type(); if (in_stype == kCSRStorage) { SliceCsrImpl<xpu>(param, ctx, inputs[0], req[0], outputs[0]); } else { LOG(FATAL) << "Slice not implemented for storage type" << in_stype; } } template<int ndim> inline bool GetIndexRange(const mxnet::TShape& dshape, const mxnet::Tuple<dmlc::optional<index_t>>& param_begin, const mxnet::Tuple<dmlc::optional<index_t>>& param_end, const mxnet::Tuple<dmlc::optional<index_t>>& param_step, common::StaticArray<index_t, ndim>* begin, common::StaticArray<index_t, ndim>* end, common::StaticArray<index_t, ndim>* step) { // Function returns false if output is zero-sized, true otherwise. bool zero_size_shape = false; CHECK_NE(dshape.ndim(), 0U); CHECK_LE(param_begin.ndim(), dshape.ndim()) << "Slicing axis exceeds data dimensions"; CHECK_LE(param_end.ndim(), dshape.ndim()) << "Slicing axis exceeds data dimensions"; CHECK_EQ(param_begin.ndim(), param_end.ndim()) << "begin and end must have the same length"; CHECK_EQ(ndim, dshape.ndim()) << "Static array size=" << ndim << " is not equal to data shape ndim=" << dshape.ndim(); if (param_step.ndim() > 0) { CHECK_EQ(param_step.ndim(), param_begin.ndim()) << "step and begin must have the same length"; } for (int i = 0; i < param_begin.ndim(); ++i) { index_t s = param_step.ndim() > 0 && param_step[i].has_value() ? param_step[i].value() : 1; CHECK_NE(s, 0) << "slice op step[" << i << "] cannot be 0"; index_t b = 0, e = 0; const index_t len = dshape[i]; if (len > 0) { b = param_begin[i].has_value() ? param_begin[i].value() : (s < 0 ? len - 1 : 0); e = param_end[i].has_value() ? param_end[i].value() : (s < 0 ? -1 : len); if (b < 0) { b += len; } if (e < 0 && param_end[i].has_value()) { e += len; } // move the begin and end to correct position for calculating dim size b = (b < 0 && s > 0) ? 0 : b; b = (b > len - 1 && s < 0) ? len - 1 : b; // if the start value lead to empty tensor under step s, use -1 for indication b = (b < 0 || b > len - 1) ? -1 : b; e = e > -1 ? e : -1; e = e > len ? len : e; } else if (len == 0) { b = 0; e = 0; } (*begin)[i] = b; (*end)[i] = e; (*step)[i] = s; // checking begin==end if (b == e) { zero_size_shape = true; } } for (int i = param_begin.ndim(); i < dshape.ndim(); ++i) { (*begin)[i] = 0; (*end)[i] = dshape[i]; (*step)[i] = 1; } return zero_size_shape; } inline void SetSliceOpOutputDimSize(const mxnet::TShape& dshape, const index_t i, const index_t b, const index_t e, const index_t s, mxnet::TShape* oshape) { if (!mxnet::dim_size_is_known(dshape, i)) { (*oshape)[i] = -1; return; } if (e != b && b >= 0) { if (s > 0) { (*oshape)[i] = e > b ? (e - b - 1) / s + 1 : 0; } else { (*oshape)[i] = e < b ? (b - e - 1) / (-s) + 1 : 0; } } else { (*oshape)[i] = 0; } } inline bool SliceOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& dshape = (*in_attrs)[0]; if (!mxnet::ndim_is_known(dshape)) return false; CHECK_GT(dshape.ndim(), 0) << "slice only works for ndim > 0"; const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); mxnet::TShape oshape = dshape; MXNET_NDIM_SWITCH(dshape.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step); for (int i = 0; i < param.begin.ndim(); ++i) { const index_t b = begin[i], e = end[i], s = step[i]; SetSliceOpOutputDimSize(dshape, i, b, e, s, &oshape); } }) SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); return shape_is_known(dshape) && shape_is_known(oshape); } template<int ndim, int req, typename xpu> struct slice_forward; template<int ndim, int req> struct slice_forward<ndim, req, gpu> { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data, const mshadow::Shape<ndim> dshape, const mshadow::Shape<ndim> oshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = dshape[ndim-1]; const index_t out_last_dim_size = oshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; const index_t j = i % out_last_dim_size; index_t irow = 0; // row id of flattend 2D data index_t stride = 1; index_t idx = i / out_last_dim_size; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % oshape[k]) * step[k] + begin[k]); idx /= oshape[k]; stride *= dshape[k]; } KERNEL_ASSIGN(out[i], req, data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]); } }; template<int ndim, int req> struct slice_forward<ndim, req, cpu> { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data, const mshadow::Shape<ndim> dshape, const mshadow::Shape<ndim> oshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = dshape[ndim-1]; const index_t out_last_dim_size = oshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; index_t out_offset = i * out_last_dim_size; for (index_t j = 0; j < out_last_dim_size; ++j) { index_t irow = 0; // row id of flattend 2D data index_t stride = 1; index_t idx = i; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % oshape[k]) * step[k] + begin[k]); idx /= oshape[k]; stride *= dshape[k]; } KERNEL_ASSIGN(out[out_offset++], req, data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]); } } }; template<typename xpu> void SliceOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); if (req[0] == kNullOp) return; using namespace mshadow; Stream<xpu>* s = ctx.get_stream<xpu>(); const TBlob& data = inputs[0]; const TBlob& out = outputs[0]; if (out.Size() == 0) return; const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); MXNET_NDIM_SWITCH(data.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step); MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { size_t num_threads = out.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= out.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s, num_threads, out.dptr<DType>(), data.dptr<DType>(), data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step); }) }) }) } template<int ndim, int req, typename xpu> struct slice_assign; template<int ndim, int req> struct slice_assign<ndim, req, cpu> { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val, const mshadow::Shape<ndim> oshape, const mshadow::Shape<ndim> vshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = oshape[ndim-1]; const index_t out_last_dim_size = vshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; index_t offset = i * out_last_dim_size; for (index_t j = 0; j < out_last_dim_size; ++j) { index_t irow = 0; // row id of flattend 2D out index_t stride = 1; index_t idx = i; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % vshape[k]) * step[k] + begin[k]); idx /= vshape[k]; stride *= oshape[k]; } KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val[offset++]); } } }; template<int ndim, int req> struct slice_assign<ndim, req, gpu> { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val, const mshadow::Shape<ndim> oshape, const mshadow::Shape<ndim> vshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = oshape[ndim-1]; const index_t out_last_dim_size = vshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; const index_t j = i % out_last_dim_size; index_t irow = 0; // row id of flattend 2D out index_t stride = 1; index_t idx = i / out_last_dim_size; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % vshape[k]) * step[k] + begin[k]); idx /= vshape[k]; stride *= oshape[k]; } KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val[i]); } }; template<typename xpu> void SliceOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); if (req[0] == kNullOp) return; using namespace mshadow; Stream<xpu>* s = ctx.get_stream<xpu>(); const TBlob& ograd = inputs[0]; const TBlob& igrad = outputs[0]; const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); if (req[0] == kWriteTo) { Fill(s, igrad, req[0], 0); } else if (req[0] == kWriteInplace) { LOG(FATAL) << "_slice_backward does not support kWriteInplace"; } if (ograd.Size() == 0) return; MXNET_NDIM_SWITCH(ograd.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(igrad.shape_, param.begin, param.end, param.step, &begin, &end, &step); MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { int num_threads = ograd.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= ograd.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads, igrad.dptr<DType>(), ograd.dptr<DType>(), igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step); }) }) }) } inline bool SliceAssignOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 2U); CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& dshape = (*in_attrs)[0]; if (!mxnet::ndim_is_known(dshape)) return false; mxnet::TShape vshape = dshape; // vshape is the value shape on the right hand side const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); MXNET_NDIM_SWITCH(dshape.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step); for (int i = 0; i < param.begin.ndim(); ++i) { const int b = begin[i], e = end[i], s = step[i]; SetSliceOpOutputDimSize(dshape, i, b, e, s, &vshape); } }) SHAPE_ASSIGN_CHECK(*in_attrs, 1, vshape); SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape); return true; } template<typename xpu> void SliceAssignOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; CHECK_EQ(inputs.size(), 2U); // data[index] = val, data and val are two inputs CHECK_EQ(outputs.size(), 1U); if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& data = inputs[0]; const TBlob& val = inputs[1]; const TBlob& out = outputs[0]; if (req[0] == kWriteTo) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s); Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s); Copy(out, in, s); }); } else if (req[0] != kWriteInplace) { LOG(FATAL) << "_slice_assign only supports kWriteTo and kWriteInplace"; } const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); MXNET_NDIM_SWITCH(data.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step); if (zero_size_shape) { return; // slice_assign of zero-sized subspace needs no operation. } MSHADOW_TYPE_SWITCH(out.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { int num_threads = val.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= val.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads, out.dptr<DType>(), val.dptr<DType>(), out.shape_.get<ndim>(), val.shape_.get<ndim>(), begin, step); }) }) }) } struct SliceAssignScalarParam : public dmlc::Parameter<SliceAssignScalarParam> { double scalar; mxnet::Tuple<dmlc::optional<index_t>> begin, end; mxnet::Tuple<dmlc::optional<index_t>> step; DMLC_DECLARE_PARAMETER(SliceAssignScalarParam) { DMLC_DECLARE_FIELD(scalar) .set_default(0) .describe("The scalar value for assignment."); DMLC_DECLARE_FIELD(begin) .describe("starting indices for the slice operation, supports negative indices."); DMLC_DECLARE_FIELD(end) .describe("ending indices for the slice operation, supports negative indices."); DMLC_DECLARE_FIELD(step) .set_default(mxnet::Tuple<dmlc::optional<index_t>>()) .describe("step for the slice operation, supports negative values."); } }; inline bool SliceAssignScalarOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& dshape = (*in_attrs)[0]; if (!shape_is_known(dshape)) return false; SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape); return true; } template<int ndim> struct slice_assign_scalar { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType val, const OpReqType req, const mshadow::Shape<ndim> oshape, const mshadow::Shape<ndim> vshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = oshape[ndim-1]; const index_t out_last_dim_size = vshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; for (index_t j = 0; j < out_last_dim_size; ++j) { index_t irow = 0; // row id of flattend 2D out index_t stride = 1; index_t idx = i; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % vshape[k]) * step[k] + begin[k]); idx /= vshape[k]; stride *= oshape[k]; } KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val); } } }; template<typename xpu> void SliceAssignScalarOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); using namespace mshadow; Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& data = inputs[0]; const TBlob& out = outputs[0]; if (req[0] == kWriteTo) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s); Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s); Copy(out, in, s); }); } else if (req[0] != kWriteInplace) { LOG(FATAL) << "_crop_assign_scalar only supports kWriteTo and kWriteInplace"; } mxnet::TShape vshape = data.shape_; const SliceAssignScalarParam& param = nnvm::get<SliceAssignScalarParam>(attrs.parsed); MXNET_NDIM_SWITCH(data.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step); if (zero_size_shape) { return; // slice_assign of zero-sized subspaced needs no operation. } for (index_t i = 0; i < param.begin.ndim(); ++i) { const int b = begin[i], e = end[i], s = step[i]; SetSliceOpOutputDimSize(data.shape_, i, b, e, s, &vshape); } MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, { mxnet_op::Kernel<slice_assign_scalar<ndim>, xpu>::Launch(s, vshape.FlatTo2D()[0], out.dptr<DType>(), static_cast<DType>(param.scalar), req[0], out.shape_.get<ndim>(), vshape.get<ndim>(), begin, step); }) }) } struct SliceAxisParam : public dmlc::Parameter<SliceAxisParam> { int axis; index_t begin; dmlc::optional<index_t> end; DMLC_DECLARE_PARAMETER(SliceAxisParam) { DMLC_DECLARE_FIELD(axis) .describe("Axis along which to be sliced, supports negative indexes."); DMLC_DECLARE_FIELD(begin) .describe("The beginning index along the axis to be sliced, " " supports negative indexes."); DMLC_DECLARE_FIELD(end) .describe("The ending index along the axis to be sliced, " " supports negative indexes."); } }; inline void GetSliceAxisParams(const SliceAxisParam& param, const mxnet::TShape& ishape, int* axis, index_t* begin, index_t* end) { *axis = param.axis; if (*axis < 0) { *axis += ishape.ndim(); } CHECK(*axis < ishape.ndim() && *axis >= 0) << "Transformed axis must be smaller than the source ndim and larger than zero! Recieved axis=" << param.axis << ", src_ndim=" << ishape.ndim() << ", transformed axis=" << *axis; index_t axis_size = static_cast<index_t>(ishape[*axis]); *begin = param.begin; *end = -1; if (*begin < 0) { *begin += axis_size; } if (axis_size > 0) { if (!static_cast<bool>(param.end)) { *end = axis_size; } else { *end = param.end.value(); if (*end < 0) { *end += axis_size; } } CHECK(*end <= axis_size) << "Invalid end for end=" << *end << " as axis_size is " << axis_size; CHECK((*begin < *end)) << "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end; } else { *begin = 0; *end = 0; } CHECK(*end >= 0) << "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end; CHECK(*begin >= 0) << "Invalid begin for begin=" << param.begin; } inline bool SliceAxisShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape& ishape = (*in_attrs)[0]; if (!mxnet::ndim_is_known(ishape)) return false; int axis; index_t begin, end; GetSliceAxisParams(param, ishape, &axis, &begin, &end); if (!mxnet::dim_size_is_known(ishape, axis)) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape); return false; } mxnet::TShape shape(ishape.ndim(), -1); for (int i = 0; i < ishape.ndim(); ++i) { if (i == axis) { shape[i] = static_cast<index_t>(end - begin); } else { shape[i] = ishape[i]; } } SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape); return shape_is_known(shape); } template<typename xpu> void SliceAxis(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow::expr; const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); int axis; index_t begin, end; GetSliceAxisParams(param, inputs[0].shape_, &axis, &begin, &end); int ndim = outputs[0].ndim(); if (axis + 1 == ndim) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mshadow::Tensor<xpu, 2, DType> in = inputs[0].FlatTo2D<xpu, DType>(s); mshadow::Tensor<xpu, 2, DType> out = outputs[0].FlatTo2D<xpu, DType>(s); ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end)); }); } else { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mshadow::Tensor<xpu, 3, DType> in = inputs[0].FlatTo3D<xpu, DType>(axis, s); mshadow::Tensor<xpu, 3, DType> out = outputs[0].FlatTo3D<xpu, DType>(axis, s); ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end)); }); } } // Backward pass of broadcast over the given axis template<typename xpu> void SliceAxisGrad_(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { if (outputs[0].shape_.Size() == 0) { return; } const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed); using namespace mshadow::op; using namespace mshadow::expr; mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); int axis; index_t begin, end; GetSliceAxisParams(param, outputs[0].shape_, &axis, &begin, &end); int ndim = outputs[0].shape_.ndim(); if (axis + 1 == ndim) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mshadow::Tensor<xpu, 2, DType> ograd = inputs[0].FlatTo2D<xpu, DType>(s); mshadow::Tensor<xpu, 2, DType> igrad = outputs[0].FlatTo2D<xpu, DType>(s); if (req[0] == kAddTo) { slice<1>(igrad, begin, end) += F<identity>(ograd); } else if (req[0] == kWriteTo) { igrad = 0.0f; slice<1>(igrad, begin, end) = F<identity>(ograd); } else { CHECK_EQ(req[0], kNullOp); } }); } else { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mshadow::Tensor<xpu, 3, DType> ograd = inputs[0].FlatTo3D<xpu, DType>(axis, s); mshadow::Tensor<xpu, 3, DType> igrad = outputs[0].FlatTo3D<xpu, DType>(axis, s); if (req[0] == kAddTo) { slice<1>(igrad, begin, end) += F<identity>(ograd); } else if (req[0] == kWriteTo) { igrad = 0.0f; slice<1>(igrad, begin, end) = F<identity>(ograd); } else { CHECK_EQ(req[0], kNullOp); } }); } } struct SliceLikeParam : public dmlc::Parameter<SliceLikeParam> { mxnet::Tuple<int> axes; DMLC_DECLARE_PARAMETER(SliceLikeParam) { DMLC_DECLARE_FIELD(axes).set_default(mxnet::Tuple<int>()) .describe("List of axes on which input data will be sliced according to the " "corresponding size of the second input. By default will slice on " "all axes. Negative axes are supported."); } }; inline bool SliceLikeShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 2U); CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape& ishape = (*in_attrs)[0]; mxnet::TShape& from_shape = (*in_attrs)[1]; if (!mxnet::ndim_is_known(ishape) || !mxnet::ndim_is_known(from_shape)) { return false; } if (param.axes.ndim() == 0) { CHECK_EQ(ishape.ndim(), from_shape.ndim()) << "By default slice_axis performs slice on all axes, but ndim mismatch " "for inputs: " << ishape.ndim() << " vs. " << from_shape.ndim(); for (int i = 0; i < ishape.ndim(); ++i) { CHECK_GE(ishape[i], from_shape[i]) << "Slice axis " << i << " with size " << from_shape[i] << "exceeds limit of input with size " << ishape[i]; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, from_shape); } else { mxnet::TShape shape(ishape); for (int i = 0; i < param.axes.ndim(); ++i) { int axis = param.axes[i]; if (axis < 0) { axis += ishape.ndim(); } CHECK_GE(axis, 0) << "Slice axis: " << param.axes[i] << " too small"; CHECK_GT(ishape.ndim(), axis) << "Slice axis: " << axis << " exceeds first input: " << ishape.ndim(); CHECK_GT(from_shape.ndim(), axis) << "Slice axis: " << axis << " exceeds second input: " << from_shape.ndim(); shape[axis] = from_shape[axis]; CHECK_GE(ishape[axis], from_shape[axis]) << "Slice axis " << axis << " with size " << from_shape[axis] << "exceeds limit of input with size " << ishape[axis]; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape); } return true; } inline void SliceLikeInferRanges(const mxnet::TShape& dshape, const mxnet::TShape& fshape, const mxnet::Tuple<int>& axes, mxnet::Tuple<dmlc::optional<index_t>>* param_begin, mxnet::Tuple<dmlc::optional<index_t>>* param_end, mxnet::Tuple<dmlc::optional<index_t>>* param_step) { std::vector<dmlc::optional<index_t>> pb(dshape.ndim()); std::vector<dmlc::optional<index_t>> pe(dshape.ndim()); std::vector<dmlc::optional<index_t>> ps(dshape.ndim()); if (axes.ndim() == 0) { for (int i = 0; i < dshape.ndim(); ++i) { pb[i] = 0; pe[i] = fshape[i]; ps[i] = 1; } } else { for (int i = 0; i < axes.ndim(); ++i) { int axis = axes[i]; if (axis < 0) { axis += dshape.ndim(); } CHECK_GE(axis, 0) << "Slice axis: " << axes[i] << " too small"; CHECK_LT(axis, dshape.ndim()) << "Slice axis: " << axis << " exceeds first input: " << dshape.ndim(); CHECK_LT(axis, fshape.ndim()) << "Slice axis: " << axis << " exceeds first input: " << fshape.ndim(); pb[axis] = 0; pe[axis] = fshape[axis]; ps[axis] = 1; } } *param_begin = mxnet::Tuple<dmlc::optional<index_t>>(pb.begin(), pb.end()); *param_end = mxnet::Tuple<dmlc::optional<index_t>>(pe.begin(), pe.end()); *param_step = mxnet::Tuple<dmlc::optional<index_t>>(ps.begin(), ps.end()); } template<typename xpu> void SliceLikeForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); using namespace mshadow::expr; const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& data = inputs[0]; const TBlob& out = outputs[0]; const mxnet::TShape& ishape = data.shape_; const mxnet::TShape& from_shape = inputs[1].shape_; mxnet::Tuple<dmlc::optional<index_t>> param_begin; mxnet::Tuple<dmlc::optional<index_t>> param_end; mxnet::Tuple<dmlc::optional<index_t>> param_step; SliceLikeInferRanges(ishape, from_shape, param.axes, &param_begin, &param_end, &param_step); MXNET_NDIM_SWITCH(data.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(data.shape_, param_begin, param_end, param_step, &begin, &end, &step); MSHADOW_TYPE_SWITCH(out.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { int num_threads = out.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= out.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s, num_threads, out.dptr<DType>(), data.dptr<DType>(), data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step); }) }) }) } template<typename xpu> void SliceLikeBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 2U); CHECK_EQ(req.size(), 2U); using namespace mshadow; Stream<xpu>* s = ctx.get_stream<xpu>(); if (req[1] != kNullOp && req[1] != kAddTo) { Fill(s, outputs[1], req[1], 0); // Second input not relavant to gradients. } if (req[0] == kNullOp) return; const TBlob& ograd = inputs[0]; const TBlob& igrad = outputs[0]; const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed); if (req[0] == kWriteTo) { Fill(s, igrad, req[0], 0); } else if (req[0] == kWriteInplace) { LOG(FATAL) << "_slice_like_backward does not support kWriteInplace"; } const mxnet::TShape& ishape = ograd.shape_; const mxnet::TShape& from_shape = outputs[1].shape_; mxnet::Tuple<dmlc::optional<index_t>> param_begin; mxnet::Tuple<dmlc::optional<index_t>> param_end; mxnet::Tuple<dmlc::optional<index_t>> param_step; SliceLikeInferRanges(ishape, from_shape, param.axes, &param_begin, &param_end, &param_step); MXNET_NDIM_SWITCH(ograd.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(ograd.shape_, param_begin, param_end, param_step, &begin, &end, &step); MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { int num_threads = ograd.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= ograd.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads, igrad.dptr<DType>(), ograd.dptr<DType>(), igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step); }) }) }) } struct ClipParam : public dmlc::Parameter<ClipParam> { real_t a_min, a_max; DMLC_DECLARE_PARAMETER(ClipParam) { DMLC_DECLARE_FIELD(a_min) .describe("Minimum value"); DMLC_DECLARE_FIELD(a_max) .describe("Maximum value"); } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream a_min_s, a_max_s; a_min_s << a_min; a_max_s << a_max; (*dict)["a_min"] = a_min_s.str(); (*dict)["a_max"] = a_max_s.str(); } }; struct clip { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* datas, const float a_min, const float a_max) { DType data = datas[i]; if (data > a_max) { out[i] = a_max; } else if (data < a_min) { out[i] = a_min; } else { out[i] = data; } } }; struct clip_grad { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* grad, const DType* datas, const float a_min, const float a_max) { DType data = datas[i]; if (data > a_max) { out[i] = 0; } else if (data < a_min) { out[i] = 0; } else { out[i] = grad[i]; } } }; template<typename xpu> void Clip(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed); CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_); Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mxnet_op::Kernel<mxnet::op::clip, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), param.a_min, param.a_max); }); } template<typename xpu> void ClipEx(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { CHECK_EQ(inputs[0].dtype(), outputs[0].dtype()); CHECK_EQ(inputs[0].storage_type(), outputs[0].storage_type()); CHECK_NE(inputs[0].storage_type(), kDefaultStorage); UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Clip<xpu>); } template<typename xpu> void ClipGrad_(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mxnet_op; const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed); CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_); Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Kernel<clip_grad, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), param.a_min, param.a_max); }); } /*! * \brief The parameters of the repeat operator include * the number of repeating time and axis (optional). * The parameters will be later used to deduce the * output ndarray shape in bool RepeatShape() function. */ struct RepeatParam : public dmlc::Parameter<RepeatParam> { int repeats = 1; dmlc::optional<int> axis; DMLC_DECLARE_PARAMETER(RepeatParam) { DMLC_DECLARE_FIELD(repeats) .describe("The number of repetitions for each element."); DMLC_DECLARE_FIELD(axis) .set_default(dmlc::optional<int>()) .describe("The axis along which to repeat values." " The negative numbers are interpreted counting from the backward." " By default, use the flattened input array," " and return a flat output array."); } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream repeats_s, axis_s; repeats_s << repeats; axis_s << axis; (*dict)["repeats"] = repeats_s.str(); (*dict)["axis"] = axis_s.str(); } }; /*! * \brief Helper function for getting user input params for the operator repeat. * Sanity check the user input values. */ inline void GetRepeatParams(const RepeatParam& param, const mxnet::TShape& ishape, int* repeats, dmlc::optional<int>* axisOpt) { *repeats = param.repeats; CHECK_GE(*repeats, 0) << "repeats cannot be a negative number"; *axisOpt = param.axis; if (static_cast<bool>(*axisOpt)) { int ndims = ishape.ndim(); int axis = axisOpt->value(); if (axis < 0) { axis += ndims; } CHECK(axis >= 0 && axis < ndims) << "axis = " << axisOpt->value() << " out of bounds"; } } inline bool RepeatOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& ishape = (*in_attrs)[0]; if (!mxnet::ndim_is_known(ishape)) { return false; } int repeats = 0; dmlc::optional<int> axisOpt; GetRepeatParams(param, ishape, &repeats, &axisOpt); // If 0 repeats, return an empty 1-dim, 0-size array if (0 == repeats) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(1, 0)); return true; } // If repeats > 0, multiply the size of the corresponding axis by repeats if (static_cast<bool>(axisOpt)) { int ndims = ishape.ndim(); int axis = axisOpt.value(); if (axis < 0) { axis += ndims; } mxnet::TShape shape(ishape.ndim(), -1); for (int i = 0; i < ishape.ndim(); ++i) { if (i == axis) { shape[i] = repeats * ishape[i]; } else { shape[i] = ishape[i]; } } SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape); } else { // If axis is not input by user, return a flat 1D array of size = in.size*repeats mxnet::TShape shape(1, ishape.Size() * repeats); SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape); } return shape_is_known(out_attrs->at(0)); } inline bool RepeatOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); if ((*in_attrs)[0] != -1) { TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]); } else if ((*out_attrs)[0] != -1) { TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]); } return true; } /*! * \brief Reshape the input and output tensors for * using broadcast_to to achieve the funcitonality * of operator repeat. * \return a pair of mxnet::TShape's, first is the reshaped * input shape, second is the reshaped output shape. */ inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForRepeatOp( const mxnet::TShape& ishape, const dmlc::optional<int>& axisOpt, const int repeats) { if (static_cast<bool>(axisOpt)) { int axis = axisOpt.value(); int ndim = ishape.ndim(); if (axis < 0) { axis += ndim; } CHECK(axis >= 0 && axis < ishape.ndim()) << "Invalid input of axis"; // reshape the input tensor by adding a dim at the (axis+1)-th dim mxnet::TShape rshape(ishape.ndim()+1, 1); // the shape we want to broadcast to mxnet::TShape bshape(rshape.ndim(), 1); int i = 0; while (i <= axis) { rshape[i] = bshape[i] = ishape[i]; ++i; } rshape[i] = 1; bshape[i] = repeats; while (i < ishape.ndim()) { rshape[i+1] = ishape[i]; bshape[i+1] = ishape[i]; ++i; } return std::make_pair(rshape, bshape); } else { // axis is not input by user // reshape the tensor into shape (ishape.Size(), 1) // then add one dim at axis = 1 and broadcast to // shape (ishape.Size(), repeats) mxnet::TShape rshape(2, 1); rshape[0] = ishape.Size(); rshape[1] = 1; mxnet::TShape bshape(2, 1); bshape[0] = rshape[0]; bshape[1] = repeats; return std::make_pair(rshape, bshape); } } template<typename xpu> void RepeatOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const TBlob& iTBlob = inputs[0]; const mxnet::TShape& ishape = iTBlob.shape_; if (!shape_is_known(ishape)) return; int repeats = 0; dmlc::optional<int> axisOpt; const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed); GetRepeatParams(param, ishape, &repeats, &axisOpt); if (0 == repeats) return; std::pair<mxnet::TShape, mxnet::TShape> rshapes = \ ReshapeInputOutputForRepeatOp(ishape, axisOpt, repeats); // reshaped input tblob TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(), inputs[0].type_flag_, inputs[0].dev_id()); std::vector<TBlob> newInputs = {iblob}; // reshaped output tblob TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(), outputs[0].type_flag_, outputs[0].dev_id()); std::vector<TBlob> newOutputs = {oblob}; BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs); } /*! * \brief Compute the gradient of the loss function * with respect to the input of the operator. * Backpropagation is employed to implement the * chain rule. * \param inputs the gradient of the loss function * with respect to the outputs of the operator * \param outputs the gradient of the loss function * with respect to the inputs of the operator */ template<typename xpu> void RepeatOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); const mxnet::TShape& oshape = outputs[0].shape_; if (!shape_is_known(oshape)) return; int repeats = 0; dmlc::optional<int> axisOpt; const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed); GetRepeatParams(param, oshape, &repeats, &axisOpt); if (0 == repeats) return; std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForRepeatOp(oshape, axisOpt, repeats); // reshaped output grad tblob TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(), outputs[0].type_flag_, outputs[0].dev_id()); std::vector<TBlob> newOutputs = {oblob}; // reshaped input grad tblob TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(), inputs[0].type_flag_, inputs[0].dev_id()); std::vector<TBlob> newInputs = {iblob}; ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>( ctx, newInputs, req, newOutputs, rshapes.first); } struct TileParam : public dmlc::Parameter<TileParam> { mxnet::Tuple<int> reps; DMLC_DECLARE_PARAMETER(TileParam) { DMLC_DECLARE_FIELD(reps) .describe("The number of times for repeating the tensor a. Each dim size of reps" " must be a positive integer." " If reps has length d, the result will have dimension of max(d, a.ndim);" " If a.ndim < d, a is promoted to be d-dimensional by prepending new axes." " If a.ndim > d, reps is promoted to a.ndim by pre-pending 1's to it."); } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream reps_s; reps_s << reps; (*dict)["reps"] = reps_s.str(); } }; inline bool TileOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); const TileParam& param = nnvm::get<TileParam>(attrs.parsed); const mxnet::TShape& ishape = (*in_attrs)[0]; if (!shape_is_known(ishape)) { return false; } const mxnet::Tuple<int>& reps = param.reps; // If reps is empty, return a identical input array if (reps.ndim() == 0) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape); return true; } mxnet::TShape oshape(std::max(ishape.ndim(), reps.ndim()), -1); int i1 = ishape.ndim() - 1; int i2 = reps.ndim() - 1; for (int i = oshape.ndim() - 1; i >= 0; --i) { if (i1 >= 0 && i2 >= 0) { oshape[i] = ishape[i1--] * reps[i2--]; } else if (i1 >= 0) { oshape[i] = ishape[i1--]; } else if (i2 >= 0) { oshape[i] = reps[i2--]; } } // If reps contains 0s, oshape is a zero-size shape. // Need to distinguish between np_shape mode and legacy mode. if (!Imperative::Get()->is_np_shape()) { common::ConvertToNumpyShape(&oshape); } SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); return shape_is_known(oshape); } inline bool TileOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); if ((*in_attrs)[0] != -1) { TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]); } else if ((*out_attrs)[0] != -1) { TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]); } return true; } /*! * \brief Reshape the input and output tensors for * using broadcast_to to achieve the functionality * of operator tile. * \return a pair of mxnet::TShape's, first is the reshaped * input shape, second is the reshaped output shape. */ inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForTileOp( const mxnet::TShape& ishape, const mxnet::Tuple<int>& reps) { if (reps.ndim() == 0) { return std::make_pair(ishape, ishape); } // The shape we want to broadcast to mxnet::TShape bshape(std::max(ishape.ndim(), reps.ndim()) * 2, 1); // The shape of the input tensor after adding new axes before each dim mxnet::TShape rshape(bshape.ndim(), 1); int i1 = ishape.ndim() - 1; int i2 = reps.ndim() - 1; for (int i = bshape.ndim() - 1; i >= 0; --i) { if (0 == (i & 1)) { bshape[i] = (i2 >= 0? reps[i2--] : 1); rshape[i] = 1; } else { rshape[i] = bshape[i] = (i1 >= 0? ishape[i1--] : 1); } } return std::make_pair(rshape, bshape); } /*! * \brief Implementation of tiling the input tensor a based * on the user-input shape, reps. * If a.ndim < reps.ndim, new axes are pre-pended to a. For example, * the input tensor has shape (3,), and the reps is (2, 4); the input * tensor would be reshaped to (1, 3). * If a.ndim > reps.ndim, pre-pending 1's to reps. For example, * the input tensor has shape (2, 3, 4, 5), and reps is (2, 2); * the reps would be changed to (1, 1, 2, 2). * Suppose we have a.ndim = reps.ndim now. To achieve tiling, * we utilize the operator broadcast_to. For example, for a tensor * of shape (2, 3, 4, 5) and reps (2, 8, 9, 3), we first reshape * the tensor to the shape (1, 2, 1, 3, 1, 4, 1, 5) by adding * one axis before each dimension. Then, we want to broadcast * the new tensor to shape (2, 2, 8, 3, 9, 4, 3, 5). The final * output tensor would have shape (2*2, 8*3, 9*4, 3*5). */ template<typename xpu> void TileOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); if (inputs[0].Size() == 0) return; const mxnet::TShape& ishape = inputs[0].shape_; const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps; // If any one of the number in reps is zero, return immediately for (int i = 0; i < reps.ndim(); ++i) { if (0 == reps[i]) return; } std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(ishape, reps); // reshaped input tblob TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(), inputs[0].type_flag_, inputs[0].dev_id()); std::vector<TBlob> newInputs = {iblob}; // reshaped output tblob TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(), outputs[0].type_flag_, outputs[0].dev_id()); std::vector<TBlob> newOutputs = {oblob}; BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs); } /*! * \brief Compute the gradient of the loss function * with respect to the input of the operator. * Backpropagation is employed to implement the * chain rule. * \param inputs the gradient of the loss function * with respect to the outputs of the operator * \param outputs the gradient of the loss function * with respect to the inputs of the operator */ template<typename xpu> void TileOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); if (inputs[0].Size() == 0) return; const mxnet::TShape& oshape = outputs[0].shape_; const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps; // If any one of the number in reps is zero, return immediately for (int i = 0; i < reps.ndim(); ++i) { if (0 == reps[i]) return; } std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(oshape, reps); // reshaped output grad tblob TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(), outputs[0].type_flag_, outputs[0].dev_id()); std::vector<TBlob> newOutputs = {oblob}; // reshaped input grad tblob TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(), inputs[0].type_flag_, inputs[0].dev_id()); std::vector<TBlob> newInputs = {iblob}; ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>( ctx, newInputs, req, newOutputs, rshapes.first); } struct ReverseParam : public dmlc::Parameter<ReverseParam> { mxnet::Tuple<int> axis; DMLC_DECLARE_PARAMETER(ReverseParam) { DMLC_DECLARE_FIELD(axis) .describe("The axis which to reverse elements."); } }; #define REVERSE_MAX_DIM 10U struct reverse { MSHADOW_XINLINE static index_t ReverseIndex(index_t idx, index_t nreversedim, const index_t * stride_, const index_t * trailing_) { index_t outputIndex = idx; for (index_t i = 0; i < nreversedim; ++i) { const index_t low = outputIndex % trailing_[i]; index_t high = outputIndex / trailing_[i]; const index_t x = high%stride_[i]; high /= stride_[i]; outputIndex = (high*stride_[i] + stride_[i] - 1 - x)*trailing_[i] + low; } return outputIndex; } #ifdef __CUDACC__ template<typename DType> __device__ static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst, const index_t * stride_, const index_t * trailing_) { __shared__ index_t stride_share[REVERSE_MAX_DIM]; __shared__ index_t trailing_share[REVERSE_MAX_DIM]; if (threadIdx.x < REVERSE_MAX_DIM) { stride_share[threadIdx.x] = stride_[threadIdx.x]; trailing_share[threadIdx.x] = trailing_[threadIdx.x]; } __syncthreads(); index_t new_idx = ReverseIndex(index, nreversedim, stride_share, trailing_share); dst[new_idx] = src[index]; } #else template<typename DType> MSHADOW_XINLINE static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst, const index_t * stride_, const index_t * trailing_) { index_t new_idx = ReverseIndex(index, nreversedim, stride_, trailing_); dst[new_idx] = src[index]; } #endif }; template<typename xpu> void ReverseOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mxnet_op; const ReverseParam& param = nnvm::get<ReverseParam>(attrs.parsed); CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_); CHECK_LT(param.axis.ndim(), REVERSE_MAX_DIM); Stream<xpu> *s = ctx.get_stream<xpu>(); const mxnet::TShape& ishape = inputs[0].shape_; std::vector<index_t> stride_(param.axis.ndim()); std::vector<index_t> trailing_(param.axis.ndim()); index_t reverse_index = 0; for (int axis : param.axis) { CHECK_LT(axis, ishape.ndim()); stride_[reverse_index] = ishape[axis]; trailing_[reverse_index] = 1; for (int i2 = axis + 1; i2 < ishape.ndim(); ++i2) { trailing_[reverse_index] *= ishape[i2]; } reverse_index++; } #ifdef __CUDACC__ mshadow::Tensor<xpu, 1, uint8_t> workspace = ctx.requested[0].get_space_typed<xpu, 1, uint8_t>( mshadow::Shape1(reverse_index * sizeof(index_t) * 2), s); auto stride_workspace = workspace.dptr_; auto trailing_workspace = workspace.dptr_ + reverse_index * sizeof(index_t); cudaMemcpyAsync(stride_workspace, thrust::raw_pointer_cast(stride_.data()), stride_.size() * sizeof(index_t), cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s)); cudaMemcpyAsync(trailing_workspace, thrust::raw_pointer_cast(trailing_.data()), trailing_.size() * sizeof(index_t), cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s)); #endif #ifdef __CUDACC__ MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index, inputs[0].dptr<DType>(), outputs[0].dptr<DType>(), reinterpret_cast<index_t*>(stride_workspace), reinterpret_cast<index_t*>(trailing_workspace)); }); #else MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index, inputs[0].dptr<DType>(), outputs[0].dptr<DType>(), stride_.data(), trailing_.data()); }); #endif } struct StackParam : public dmlc::Parameter<StackParam> { int axis; int num_args; DMLC_DECLARE_PARAMETER(StackParam) { DMLC_DECLARE_FIELD(axis) .set_default(0) .describe("The axis in the result array along which the input arrays are stacked."); DMLC_DECLARE_FIELD(num_args).set_lower_bound(1) .describe("Number of inputs to be stacked."); } }; inline bool StackOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const StackParam& param = dmlc::get<StackParam>(attrs.parsed); mxnet::TShape dshape; for (const mxnet::TShape& i : (*in_attrs)) { shape_assign(&dshape, i); } if (!shape_is_known(dshape)) return false; mxnet::TShape oshape(dshape.ndim() + 1, -1); int axis = CheckAxis(param.axis, oshape.ndim()); for (int i = 0; i < axis; ++i) { oshape[i] = dshape[i]; } oshape[axis] = param.num_args; for (index_t i = axis + 1; i < oshape.ndim(); ++i) { oshape[i] = dshape[i-1]; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); return shape_is_known(oshape); } template<typename xpu> void StackOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; const StackParam& param = dmlc::get<StackParam>(attrs.parsed); int axis = CheckAxis(param.axis, outputs[0].ndim()); Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, { std::vector<Tensor<xpu, 3, DType> > data(inputs.size()); Tensor<xpu, 3, DType> out; size_t leading = 1, trailing = 1; for (int i = 0; i < axis; ++i) { leading *= outputs[0].shape_[i]; } for (int i = axis + 1; i < outputs[0].ndim(); ++i) { trailing *= outputs[0].shape_[i]; } size_t mid = outputs[0].shape_[axis]; Shape<3> oshape = Shape3(leading, mid, trailing); out = outputs[0].get_with_shape<xpu, 3, DType>(oshape, s); for (size_t i = 0; i < inputs.size(); ++i) { Shape<3> dshape = Shape3(leading, 1, trailing); data[i] = inputs[i].get_with_shape<xpu, 3, DType>(dshape, s); } Concatenate(data, &out, 1, req[0]); }) } template<typename xpu> void StackOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; const StackParam& param = dmlc::get<StackParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, { std::vector<Tensor<xpu, 3, DType> > grad_in(outputs.size()); Tensor<xpu, 3, DType> grad; size_t leading = 1, trailing = 1; for (int i = 0; i < axis; ++i) { leading *= inputs[0].shape_[i]; } for (int i = axis + 1; i < inputs[0].ndim(); ++i) { trailing *= inputs[0].shape_[i]; } size_t mid = inputs[0].shape_[axis]; Shape<3> oshape = Shape3(leading, mid, trailing); grad = inputs[0].get_with_shape<xpu, 3, DType>(oshape, s); for (size_t i = 0; i < outputs.size(); ++i) { Shape<3> dshape = Shape3(leading, 1, trailing); grad_in[i] = outputs[i].get_with_shape<xpu, 3, DType>(dshape, s); } Split(grad, &grad_in, 1, req); }) } struct SqueezeParam : public dmlc::Parameter<SqueezeParam> { dmlc::optional<mxnet::Tuple<int>> axis; DMLC_DECLARE_PARAMETER(SqueezeParam) { DMLC_DECLARE_FIELD(axis) .set_default(dmlc::optional<mxnet::Tuple<int>>()) .describe("Selects a subset of the single-dimensional entries in the shape." " If an axis is selected with shape entry greater than one, an error is raised."); } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream axis_s; axis_s << axis; (*dict)["axis"] = axis_s.str(); } }; // Given a shape that may have dim size equal to 0, // move all the zeros to the last of the shape array // and keep the relative order of the non-zero values. // Returns the new shape size after moving all zeros to the end. inline size_t SqueezeShapeHelper(mxnet::TShape* shape) { CHECK(shape != nullptr); size_t count = 0; for (int i = 0; i < shape->ndim(); ++i) { if ((*shape)[i] == -1) { ++count; } else { std::swap((*shape)[i], (*shape)[i-count]); } } return shape->ndim() - count; } inline bool SqueezeShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const SqueezeParam& param = nnvm::get<SqueezeParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]"; CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& dshape = in_attrs->at(0); const int dndim = dshape.ndim(); if (!shape_is_known(dshape)) return false; mxnet::TShape oshape = dshape; if (param.axis.has_value()) { // preprocess axis mxnet::Tuple<int> axes = param.axis.value(); for (int i = 0; i < axes.ndim(); ++i) { if (axes[i] < 0) { axes[i] += dndim; CHECK_GE(axes[i], 0) << "axis " << axes[i] - dndim << " is out of bounds for array of dimension " << dndim; } CHECK_LT(axes[i], dndim) << "axis " << axes[i] << " is out of bounds for array of dimension " << dndim; CHECK_EQ(dshape[axes[i]], 1) << "cannot select an axis to squeeze out which has size=" << dshape[axes[i]] << " not equal to one"; CHECK_NE(oshape[axes[i]], -1) << "duplicate value in axis"; oshape[axes[i]] = -1; } } else { for (int i = 0; i < oshape.ndim(); ++i) { if (oshape[i] == 1) oshape[i] = -1; } } size_t oshape_size = SqueezeShapeHelper(&oshape); if (oshape_size == 0) { // corner case when dshape is (1, 1, 1, 1) oshape[0] = 1; oshape_size = 1; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(oshape.data(), oshape.data()+oshape_size)); return true; } struct DepthToSpaceParam : public dmlc::Parameter<DepthToSpaceParam> { int block_size; DMLC_DECLARE_PARAMETER(DepthToSpaceParam) { DMLC_DECLARE_FIELD(block_size) .describe("Blocks of [block_size. block_size] are moved"); } }; inline bool DepthToSpaceOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Depth To Space requires exactly 4D tensor"; mxnet::TShape expected_out(4, -1); mxnet::TShape& in_shape = in_attrs->at(0); if (!mxnet::ndim_is_known(in_shape)) { return false; } int block = param.block_size; CHECK_NE(block, 0) << "block_size must be a positive integer value"; CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0"; CHECK_EQ(in_shape[1] % (block * block), 0) << "Cannot perform Depth To Space operation on the specified tensor." " Dimension:1(depth dimension) should be a multiple of 'block^2'"; CHECK_NE(in_shape[0], 0) << "Operation requires a 4D tensor. Size of dimension:0 cannot be 0"; CHECK_NE(in_shape[2], 0) << "Operation requires a 4D tensor. Size of dimension:2 cannot be 0"; CHECK_NE(in_shape[3], 0) << "Operation requires a 4D tensor. Size of dimension:3 cannot be 0"; expected_out[0] = in_shape[0]; expected_out[1] = in_shape[1] / (block * block); int i = 2; while (i < expected_out.ndim()) { expected_out[i] = in_shape[i] * block; ++i; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out); return shape_is_known(expected_out); } inline bool DepthToSpaceOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0)); return out_attrs->at(0) != -1; } /*! * \brief This function updates the value of input index from where the data element * needs to be fetched and written out to the ith location in output tensor * \param index_position index within offset array to get offset of given dimension * \param dim_size size of current dimension * \param idx output tensor index * \param inp_index index within input tensor from where value is retrieved * \param offset_arr array containing the linear offset of input tensor */ MSHADOW_XINLINE void update_index(index_t index_position, index_t dim_size, index_t *idx, index_t *inp_index, const index_t* offset_arr) { index_t next_idx_val = *idx / dim_size; *inp_index += (*idx - next_idx_val * dim_size) * offset_arr[index_position]; *idx = next_idx_val; } /*! * \brief This function performs the tensor transpose (0, 1, 2, 3, 4, 5) -> * (0, 3, 4, 1, 5, 2) by computing linear index within input tensor to be mapped * to the ith index of output tensor * \param i tensor index * \param out_data output tensor * \param in_data input tensor * \param block size of chunks to be moved out of depth dimension * \param size array containing the size of each dimension of input tensor * \param offset_arr array containing the linear offset of input tensor */ template<int req> struct depth_to_space_forward { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data, const int block, const index_t* size, const index_t* offset_arr) { index_t inp_index = 0, idx = i, dim_size; dim_size = block; update_index(2, dim_size, &idx, &inp_index, offset_arr); dim_size = size[3]; update_index(5, dim_size, &idx, &inp_index, offset_arr); dim_size = block; update_index(1, dim_size, &idx, &inp_index, offset_arr); dim_size = size[2]; update_index(4, dim_size, &idx, &inp_index, offset_arr); dim_size = size[1] / (block * block); update_index(3, dim_size, &idx, &inp_index, offset_arr); dim_size = size[0]; update_index(0, dim_size, &idx, &inp_index, offset_arr); KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]); } }; /*! * \brief This function calculates the linear offset for each dimension of * input tensor and stores them in an array, which is later used in * performing depth_to_space operation * \param i global thread id * \param offset_arr array to be populated with offset values * \param size array to be populated with size of each dimension of input tensor * \param block size of chunks to be moved out of depth dimension * \param size0 size of Dim 0 of input tensor * \param size1 size of Dim 1 of input tensor * \param size2 size of Dim 2 of input tensor * \param size3 size of Dim 3 of input tensor */ template<int req> struct compute_offset_for_depth_to_space { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block, const index_t size0, const index_t size1, const index_t size2, const index_t size3) { size[0] = size0; size[1] = size1; size[2] = size2; size[3] = size3; offset_arr[5] = 1; offset_arr[4] = offset_arr[5] * size[3]; offset_arr[3] = offset_arr[4] * size[2]; offset_arr[2] = offset_arr[3] * size[1] / (block * block); offset_arr[1] = offset_arr[2] * block; offset_arr[0] = offset_arr[1] * block; } }; template<typename xpu> void DepthToSpaceOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& in_data = inputs[0]; const TBlob& out_data = outputs[0]; const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed); using namespace mxnet_op; int block = param.block_size; mshadow::Tensor<xpu, 1, char> workspace = ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s); char* workspace_curr_ptr = workspace.dptr_; index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr); index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6); MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { Kernel<compute_offset_for_depth_to_space<req_type>, xpu>::Launch( s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1], in_data.shape_[2], in_data.shape_[3]); Kernel<depth_to_space_forward<req_type>, xpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(), block, size, offset_arr); }); }); } inline bool SpaceToDepthOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Space To Depth requires exactly 4D tensor"; mxnet::TShape expected_out(in_attrs->at(0).ndim(), -1); mxnet::TShape& in_shape = in_attrs->at(0); if (!mxnet::ndim_is_known(in_shape)) { return false; } int block = param.block_size; CHECK_NE(block, 0) << "block_size must be a positive integer value"; CHECK_NE(in_shape[0], 0) << "Operation requires a 4D tensor. Size of dimension:0 cannot be 0"; CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0"; CHECK_NE(in_shape[2], 0) << "Operation requires a 4D tensor. Size of dimension:2 cannot be 0"; CHECK_EQ(in_shape[2] % block, 0) << "Cannot perform Depth To Space operation on the specified tensor." " Dimension:2(1st Space dimension) should be a multiple of 'block' "; CHECK_NE(in_shape[3], 0) << "Operation requires a 4D tensor. Size of dimension:3 cannot be 0"; CHECK_EQ(in_shape[3] % block, 0) << "Cannot perform Depth To Space operation on the specified tensor." " Dimension:3(2nd space dimension) should be a multiple of 'block' "; expected_out[0] = in_shape[0]; expected_out[1] = in_shape[1] * block * block; int i = 2; while (i < expected_out.ndim()) { expected_out[i] = in_shape[i] / block; ++i; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out); return shape_is_known(expected_out); } inline bool SpaceToDepthOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0)); return out_attrs->at(0) != -1; } /*! * \brief This function preforms the tensor transpose (0, 1, 2, 3, 4, 5) -> * (0, 3, 5, 1, 2, 4) by computing linear index within input tensor to be mapped * to the ith index of output tensor * \param i tensor index * \param out_data output tensor * \param in_data input tensor * \param block size of chunks to be moved out of depth dimension * \param size array containing the size of each dimension of input tensor * \param offset_arr array containing the linear offset of input tensor */ template<int req> struct space_to_depth_forward { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data, const int block, const index_t* size, const index_t* offset_arr) { index_t inp_index = 0, idx = i, dim_size; dim_size = size[3] / block; update_index(4, dim_size, &idx, &inp_index, offset_arr); dim_size = size[2] / block; update_index(2, dim_size, &idx, &inp_index, offset_arr); dim_size = size[1]; update_index(1, dim_size, &idx, &inp_index, offset_arr); dim_size = block; update_index(5, dim_size, &idx, &inp_index, offset_arr); dim_size = block; update_index(3, dim_size, &idx, &inp_index, offset_arr); dim_size = size[0]; update_index(0, dim_size, &idx, &inp_index, offset_arr); KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]); } }; /*! * \brief This function calculates the linear offset for each dimension of * input tensor and stores them in an array, which is later used in * performing space_to_depth operation * \param i global thread id * \param offset_arr array to be populated with offset values * \param size array to be populated with size of each dimension of input tensor * \param block size of chunks to be moved out of depth dimension * \param size0 size of Dim 0 of input tensor * \param size1 size of Dim 1 of input tensor * \param size2 size of Dim 2 of input tensor * \param size3 size of Dim 3 of input tensor */ template<int req> struct compute_offset_for_space_to_depth { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block, const index_t size0, const index_t size1, const index_t size2, const index_t size3) { size[0] = size0; size[1] = size1; size[2] = size2; size[3] = size3; offset_arr[5] = 1; offset_arr[4] = offset_arr[5] * block; offset_arr[3] = offset_arr[4] * size[3] / block; offset_arr[2] = offset_arr[3] * block; offset_arr[1] = offset_arr[2] * size[2] / block; offset_arr[0] = offset_arr[1] * size[1]; } }; template<typename xpu> void SpaceToDepthOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& in_data = inputs[0]; const TBlob& out_data = outputs[0]; const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed); using namespace mxnet_op; int block = param.block_size; mshadow::Tensor<xpu, 1, char> workspace = ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s); char* workspace_curr_ptr = workspace.dptr_; index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr); index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6); MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { Kernel<compute_offset_for_space_to_depth<req_type>, xpu>::Launch( s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1], in_data.shape_[2], in_data.shape_[3]); Kernel<space_to_depth_forward<req_type>, xpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(), block, size, offset_arr); }); }); } namespace split_enum { enum SplitOpInputs {kData}; } // namespace split_enum struct SplitParam : public dmlc::Parameter<SplitParam> { mxnet::TShape indices; int axis; bool squeeze_axis; int sections; DMLC_DECLARE_PARAMETER(SplitParam) { DMLC_DECLARE_FIELD(indices) .describe("Indices of splits. The elements should denote the boundaries of at which split" " is performed along the `axis`."); DMLC_DECLARE_FIELD(axis).set_default(1) .describe("Axis along which to split."); DMLC_DECLARE_FIELD(squeeze_axis).set_default(0) .describe("If true, Removes the axis with length 1 from the shapes of the output arrays." " **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1" " only along the `axis` which it is split." " Also `squeeze_axis` can be set to ``true``" " only if ``input.shape[axis] == num_outputs``."); DMLC_DECLARE_FIELD(sections).set_default(0) .describe("Number of sections if equally splitted. Default to 0 which means split by indices."); } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream indices_s, axis_s, squeeze_axis_s, sections_s; indices_s << indices; axis_s << axis; squeeze_axis_s << squeeze_axis; sections_s << sections; (*dict)["indices"] = indices_s.str(); (*dict)["axis"] = axis_s.str(); (*dict)["squeeze_axis"] = squeeze_axis_s.str(); (*dict)["sections"] = sections_s.str(); } }; // struct SplitParam inline mxnet::TShape GetSplitIndices(const mxnet::TShape& ishape, int axis, int sections) { mxnet::TShape indices(sections+1, -1); indices[0] = 0; int64_t section_size_b = (int64_t) (ishape[axis] / sections); int64_t section_size_a = section_size_b + 1; int section_a = ishape[axis] % sections; for (int i = 0; i < sections; ++i) { if ( i < section_a ) { indices[i+1] = section_size_a * (i + 1); } else { indices[i+1] = section_size_b + indices[i]; } } return indices; } inline bool SplitOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); int dtype = (*in_attrs)[0]; CHECK_NE(dtype, -1) << "First input must have specified type"; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); out_attrs->clear(); int num_outputs = (param.sections > 0) ? param.sections : param.indices.ndim(); for (int i = 0; i < num_outputs; ++i) { out_attrs->push_back(dtype); } return true; } inline bool SplitOpShapeImpl(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs, const int real_axis) { using namespace mshadow; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); mxnet::TShape dshape = in_attrs->at(split_enum::kData); mxnet::TShape ishape = in_attrs->at(split_enum::kData); const mxnet::TShape indices = (param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices; int num_outputs = (param.sections > 0) ? indices.ndim() - 1 : indices.ndim(); // Pre-compute squeezed output shape for future usage mxnet::TShape squeezed_dshape = dshape; for (int d = real_axis; d < squeezed_dshape.ndim() - 1; ++d) { squeezed_dshape[d] = squeezed_dshape[d+1]; } squeezed_dshape = mxnet::TShape(&squeezed_dshape[0], &squeezed_dshape[squeezed_dshape.ndim()-1]); // Assign shape to every output for (int i = 0; i < num_outputs; ++i) { int start = indices[i]; int end = (i < num_outputs - 1) ? indices[i + 1] : ishape[real_axis]; if (ishape[real_axis] == 0U) { end = start; } else { CHECK(start <= end) << "start " << start << " is not less than end " << end << "for subarray " << i; CHECK(end <= ishape[real_axis]) << "end " << end << " is no less than the size of the axis " << ishape[real_axis]; } dshape[real_axis] = (end - start); if (param.squeeze_axis) { CHECK_EQ(end - start, 1U) << "expected axis size of 1 but got " << end - start; SHAPE_ASSIGN_CHECK(*out_attrs, i, squeezed_dshape); } else { SHAPE_ASSIGN_CHECK(*out_attrs, i, dshape); } } mxnet::TShape back_calculate_dshape = ishape; back_calculate_dshape[real_axis] = 0; for (int d = 0; d < real_axis; ++d) { back_calculate_dshape[d] = (*out_attrs)[0][d]; } if (param.squeeze_axis) { back_calculate_dshape[real_axis] = num_outputs; } else { for (int i = 0; i < num_outputs; ++i) { back_calculate_dshape[real_axis] += (*out_attrs)[i][real_axis]; } } for (int d = real_axis + 1; d < ishape.ndim(); ++d) { if (param.squeeze_axis) { back_calculate_dshape[d] = (*out_attrs)[0][d - 1]; } else { back_calculate_dshape[d] = (*out_attrs)[0][d]; } } SHAPE_ASSIGN_CHECK(*in_attrs, split_enum::kData, back_calculate_dshape); return true; } inline bool SplitOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { using namespace mshadow; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); mxnet::TShape dshape = in_attrs->at(split_enum::kData); if (!mxnet::ndim_is_known(dshape)) return false; if (param.axis >= 0) { CHECK_LT(param.axis, dshape.ndim()); } else { CHECK_LT(param.axis + dshape.ndim(), dshape.ndim()); } int real_axis = param.axis; if (real_axis < 0) { real_axis += dshape.ndim(); } return SplitOpShapeImpl(attrs, in_attrs, out_attrs, real_axis); } struct SplitKernel { /*! * \brief Map function for forward split_v2 operator * \param i global thread id * \param in_data ptr to input buffer * \param out_data ptr to ptr of outputs buffer * \param indices ptr to indices buffer * \param num_sections # of sections after split * \param axis_size size of axis to be splitted on * \param trailing_size step size within the data buffer of the axis to be splitted on */ template<typename DType> static MSHADOW_XINLINE void Map(size_t i, const DType *in_data, DType** out_data, const size_t* indices, const size_t num_sections, const size_t axis_size, const size_t trailing_size) { size_t idx = i / trailing_size % axis_size; size_t target = 0; for (size_t section = 0; section < num_sections && indices[section] <= idx; target = section++) {} DType* target_data = out_data[target]; const size_t mid_idx = idx - indices[target]; const size_t head_idx = i / (trailing_size * axis_size); const size_t tail_idx = i % trailing_size; const size_t section_size = indices[target + 1] - indices[target]; const size_t target_idx = head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx; target_data[target_idx] = in_data[i]; } }; struct ConcatenateKernel { /*! * \brief Map function for backward split_v2 operator * \param i global thread id * \param out_grad ptr to ptr of out grads buffer * \param in_grad ptr to input grad buffer * \param indices ptr to indices buffer * \param num_sections # of sections after split * \param axis_size size of axis to be splitted on * \param trailing_size step size within the data buffer of the axis to be splitted on */ template<typename DType> static MSHADOW_XINLINE void Map(size_t i, DType** out_grad, DType* in_grad, const size_t* indices, const size_t num_sections, const size_t axis_size, const size_t trailing_size) { size_t idx = i / trailing_size % axis_size; size_t src = 0; for (size_t section = 0; section < num_sections && indices[section] <= idx; src = section++) {} DType* src_grad = out_grad[src]; const size_t mid_idx = idx - indices[src]; const size_t head_idx = i / (trailing_size * axis_size); const size_t tail_idx = i % trailing_size; const size_t section_size = indices[src + 1] - indices[src]; const size_t src_idx = head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx; in_grad[i] = src_grad[src_idx]; } }; template<typename xpu> inline void SplitOpForwardImpl(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs, const int real_axis) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& input_data = inputs[split_enum::kData]; size_t leading = 1, trailing = 1; CHECK_LT(real_axis, input_data.ndim()); size_t mid = input_data.shape_[real_axis]; for (int i = 0; i < real_axis; ++i) { leading *= input_data.shape_[i]; } for (int i = real_axis + 1; i < input_data.ndim(); ++i) { trailing *= input_data.shape_[i]; } size_t workspace_size = 0; const mxnet::TShape& ishape = input_data.shape_; const mxnet::TShape split_pts = (param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices; std::vector<size_t> indices; for (const auto& section : split_pts) { indices.push_back(section); } if (param.sections == 0) { indices.push_back(ishape[real_axis]); } workspace_size += indices.size() * sizeof(size_t); MSHADOW_TYPE_SWITCH(input_data.type_flag_, DType, { std::vector<DType*> output_data; for (const TBlob& data : outputs) { output_data.push_back(data.dptr<DType>()); } workspace_size += output_data.size() * sizeof(DType*); Tensor<xpu, 1, char> workspace = ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s); Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size())); Tensor<xpu, 1, size_t> indices_xpu_tensor( reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size())); Tensor<cpu, 1, DType*> ptrs_cpu_tensor(output_data.data(), Shape1(output_data.size())); Tensor<xpu, 1, DType*> ptrs_xpu_tensor( reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)), Shape1(output_data.size())); mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s); mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s); Kernel<SplitKernel, xpu>::Launch( s, input_data.Size(), input_data.dptr<DType>(), ptrs_xpu_tensor.dptr_, indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing); }); } template<typename xpu> inline void SplitOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim()); const TBlob& input_data = inputs[split_enum::kData]; int real_axis = param.axis; if (real_axis < 0) { real_axis += input_data.ndim(); } SplitOpForwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis); } template<typename xpu> inline void SplitOpBackwardImpl(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs, const int real_axis) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); Stream<xpu> *s = ctx.get_stream<xpu>(); TBlob input_grad = outputs[split_enum::kData]; size_t leading = 1, trailing = 1; CHECK_LT(real_axis, input_grad.ndim()); size_t mid = input_grad.shape_[real_axis]; for (int i = 0; i < real_axis; ++i) { leading *= input_grad.shape_[i]; } for (int i = real_axis + 1; i < input_grad.ndim(); ++i) { trailing *= input_grad.shape_[i]; } size_t workspace_size = 0; const mxnet::TShape& ishape = input_grad.shape_; const mxnet::TShape split_pts = (param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices; std::vector<size_t> indices; for (const auto& section : split_pts) { indices.push_back(section); } if (param.sections == 0) { indices.push_back(ishape[real_axis]); } workspace_size += indices.size() * sizeof(size_t); MSHADOW_TYPE_SWITCH(input_grad.type_flag_, DType, { std::vector<DType*> out_grads; for (const TBlob& output_grad : inputs) { out_grads.push_back(output_grad.dptr<DType>()); } workspace_size += out_grads.size() * sizeof(DType*); Tensor<xpu, 1, char> workspace = ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s); Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size())); Tensor<xpu, 1, size_t> indices_xpu_tensor( reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size())); Tensor<cpu, 1, DType*> ptrs_cpu_tensor(out_grads.data(), Shape1(inputs.size())); Tensor<xpu, 1, DType*> ptrs_xpu_tensor( reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)), Shape1(inputs.size())); mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s); mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s); Kernel<ConcatenateKernel, xpu>::Launch( s, input_grad.Size(), ptrs_xpu_tensor.dptr_, input_grad.dptr<DType>(), indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing); }); } template<typename xpu> inline void SplitOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); CHECK_EQ(inputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim()) << "out grad vector size mush match the output size"; CHECK_EQ(outputs.size(), 1U); int real_axis = param.axis; if (real_axis < 0) { real_axis += outputs[split_enum::kData].ndim(); } SplitOpBackwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis); } inline uint32_t SplitNumOutputs(const NodeAttrs& attrs) { const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); return (param.sections > 0) ? param.sections : param.indices.ndim(); } } // namespace op } // namespace mxnet namespace std { template<> struct hash<mxnet::op::TransposeParam> { size_t operator()(const mxnet::op::TransposeParam& val) { size_t ret = 0; ret = dmlc::HashCombine(ret, val.axes); return ret; } }; template<> struct hash<mxnet::op::ReshapeParam> { size_t operator()(const mxnet::op::ReshapeParam& val) { size_t ret = 0; ret = dmlc::HashCombine(ret, val.target_shape); ret = dmlc::HashCombine(ret, val.keep_highest); ret = dmlc::HashCombine(ret, val.shape); ret = dmlc::HashCombine(ret, val.reverse); return ret; } }; template<> struct hash<mxnet::op::ExpandDimParam> { size_t operator()(const mxnet::op::ExpandDimParam& val) { size_t ret = 0; ret = dmlc::HashCombine(ret, val.axis); return ret; } }; } // namespace std #endif // MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
struct.c
// RUN: %libomptarget-compile-generic -fopenmp-extensions // RUN: %libomptarget-run-generic | %fcheck-generic -strict-whitespace // Wrong results on amdgpu // XFAIL: amdgcn-amd-amdhsa // XFAIL: amdgcn-amd-amdhsa-oldDriver #include <omp.h> #include <stdio.h> #define CHECK_PRESENCE(Var1, Var2, Var3) \ printf(" presence of %s, %s, %s: %d, %d, %d\n", \ #Var1, #Var2, #Var3, \ omp_target_is_present(&(Var1), omp_get_default_device()), \ omp_target_is_present(&(Var2), omp_get_default_device()), \ omp_target_is_present(&(Var3), omp_get_default_device())) #define CHECK_VALUES(Var1, Var2) \ printf(" values of %s, %s: %d, %d\n", \ #Var1, #Var2, (Var1), (Var2)) int main() { struct S { int i; int j; } s; // CHECK: presence of s, s.i, s.j: 0, 0, 0 CHECK_PRESENCE(s, s.i, s.j); // ======================================================================= // Check that ompx_hold keeps entire struct present. // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: ompx_hold only on first member\n"); s.i = 20; s.j = 30; #pragma omp target data map(tofrom: s) map(ompx_hold,tofrom: s.i) \ map(tofrom: s.j) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(tofrom: s) { s.i = 21; s.j = 31; } #pragma omp target exit data map(delete: s, s.i) // ompx_hold on s.i applies to all of s. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: ompx_hold only on last member\n"); s.i = 20; s.j = 30; #pragma omp target data map(tofrom: s) map(tofrom: s.i) \ map(ompx_hold,tofrom: s.j) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(tofrom: s) { s.i = 21; s.j = 31; } #pragma omp target exit data map(delete: s, s.i) // ompx_hold on s.j applies to all of s. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: ompx_hold only on struct\n"); s.i = 20; s.j = 30; #pragma omp target data map(ompx_hold,tofrom: s) map(tofrom: s.i) \ map(tofrom: s.j) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(tofrom: s) { s.i = 21; s.j = 31; } #pragma omp target exit data map(delete: s, s.i) // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ======================================================================= // Check that transfer to/from host checks reference count correctly. // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: parent DynRefCount=1 is not sufficient for transfer\n"); s.i = 20; s.j = 30; #pragma omp target data map(ompx_hold, tofrom: s) #pragma omp target data map(ompx_hold, tofrom: s) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(from: s.i, s.j) { s.i = 21; s.j = 31; } // No transfer here even though parent's DynRefCount=1. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); #pragma omp target map(to: s.i, s.j) { // No transfer here even though parent's DynRefCount=1. // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_VALUES(s.i, s.j); } } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: parent HoldRefCount=1 is not sufficient for transfer\n"); s.i = 20; s.j = 30; #pragma omp target data map(tofrom: s) #pragma omp target data map(tofrom: s) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(ompx_hold, from: s.i, s.j) { s.i = 21; s.j = 31; } // No transfer here even though parent's HoldRefCount=1. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); #pragma omp target map(ompx_hold, to: s.i, s.j) { // No transfer here even though parent's HoldRefCount=1. // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_VALUES(s.i, s.j); } } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} // // At the beginning of a region, if the parent's TotalRefCount=1, then the // transfer should happen. // // At the end of a region, it also must be true that the reference count being // decremented is the reference count that is 1. printf("check: parent TotalRefCount=1 is not sufficient for transfer\n"); s.i = 20; s.j = 30; #pragma omp target data map(ompx_hold, tofrom: s) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(ompx_hold, tofrom: s.i, s.j) { s.i = 21; s.j = 31; } #pragma omp target exit data map(from: s.i, s.j) // No transfer here even though parent's TotalRefCount=1. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); return 0; }
slam_residuals.h
#ifndef SLAM_RESIDUALS_H #define SLAM_RESIDUALS_H #include <vector> #include "../util/slam_types.h" #include "./data_structures.h" #include "Eigen/Geometry" #include "ceres/ceres.h" /* This file contains all the residuals (error functions) used in the * optimization process to solve for the maximum likelihood map. */ namespace nautilus { struct OdometryResidual { template <typename T> bool operator()(const T *pose_i, const T *pose_j, T *residual) const { // Predicted pose_j = pose_i * odometry. // Hence, error = pose_j.inverse() * pose_i * odometry; typedef Eigen::Matrix<T, 2, 1> Vector2T; // Extract the translation. const Vector2T Ti(pose_i[0], pose_i[1]); const Vector2T Tj(pose_j[0], pose_j[1]); // The Error in the translation is the difference with the odometry // in the direction of the previous pose, then getting rid of the new // rotation (transpose = inverse for rotation matrices). const Vector2T error_translation = Ti + T_odom.cast<T>() - Tj; // Rotation error is very similar to the translation error, except // we don't care about the difference in the position. const T rotation_diff = pose_i[2] + T(R_odom) - pose_j[2]; const T error_rotation = atan2(sin(rotation_diff), cos(rotation_diff)); // The residuals are weighted according to the parameters set // by the user. residual[0] = T(translation_weight) * error_translation.x(); residual[1] = T(translation_weight) * error_translation.y(); residual[2] = T(rotation_weight) * error_rotation; return true; } OdometryResidual(const slam_types::OdometryFactor2D &factor, double translation_weight, double rotation_weight) : translation_weight(translation_weight), rotation_weight(rotation_weight), R_odom(factor.rotation), T_odom(factor.translation) {} static ceres::AutoDiffCostFunction<OdometryResidual, 3, 3, 3> *create( const slam_types::OdometryFactor2D &factor, double translation_weight, double rotation_weight) { OdometryResidual *residual = new OdometryResidual(factor, translation_weight, rotation_weight); return new ceres::AutoDiffCostFunction<OdometryResidual, 3, 3, 3>(residual); } double translation_weight; double rotation_weight; const float R_odom; const Eigen::Vector2f T_odom; }; // Lidar Normal Residual struct LIDARNormalResidual { template <typename T> bool operator()(const T *source_pose, const T *target_pose, T *residuals) const { typedef Eigen::Transform<T, 2, Eigen::Affine> Affine2T; typedef Eigen::Matrix<T, 2, 1> Vector2T; const Affine2T source_to_world = PoseArrayToAffine(&source_pose[2], &source_pose[0]); const Affine2T world_to_target = PoseArrayToAffine(&target_pose[2], &target_pose[0]).inverse(); const Affine2T source_to_target = world_to_target * source_to_world; #pragma omp parallel for shared(residuals) for (size_t index = 0; index < source_points.size(); index++) { Vector2T source_pointT = source_points[index].cast<T>(); Vector2T target_pointT = target_points[index].cast<T>(); // Transform source_point into the frame of target_point source_pointT = source_to_target * source_pointT; T target_normal_result = target_normals[index].cast<T>().dot(source_pointT - target_pointT); T source_normal_result = source_normals[index].cast<T>().dot(target_pointT - source_pointT); residuals[index * 2] = target_normal_result; residuals[index * 2 + 1] = source_normal_result; } return true; } LIDARNormalResidual(const std::vector<Eigen::Vector2f> &source_points, const std::vector<Eigen::Vector2f> &target_points, const std::vector<Eigen::Vector2f> &source_normals, const std::vector<Eigen::Vector2f> &target_normals) : source_points(source_points), target_points(target_points), source_normals(source_normals), target_normals(target_normals) { CHECK_EQ(source_points.size(), target_points.size()); CHECK_EQ(target_points.size(), target_normals.size()); CHECK_EQ(source_normals.size(), target_normals.size()); } static ceres::AutoDiffCostFunction<LIDARNormalResidual, ceres::DYNAMIC, 3, 3> *create(const std::vector<Eigen::Vector2f> &source_points, const std::vector<Eigen::Vector2f> &target_points, const std::vector<Eigen::Vector2f> &source_normals, const std::vector<Eigen::Vector2f> &target_normals) { CHECK_GT(source_points.size(), 0); LIDARNormalResidual *residual = new LIDARNormalResidual( source_points, target_points, source_normals, target_normals); return new ceres::AutoDiffCostFunction<LIDARNormalResidual, ceres::DYNAMIC, 3, 3>(residual, source_points.size() * 2); } const std::vector<Eigen::Vector2f> source_points; const std::vector<Eigen::Vector2f> target_points; const std::vector<Eigen::Vector2f> source_normals; const std::vector<Eigen::Vector2f> target_normals; }; struct LIDARPointResidual { template <typename T> bool operator()(const T *source_pose, const T *target_pose, T *residuals) const { typedef Eigen::Transform<T, 2, Eigen::Affine> Affine2T; typedef Eigen::Matrix<T, 2, 1> Vector2T; const Affine2T source_to_world = PoseArrayToAffine(&source_pose[2], &source_pose[0]); const Affine2T world_to_target = PoseArrayToAffine(&target_pose[2], &target_pose[0]).inverse(); const Affine2T source_to_target = world_to_target * source_to_world; #pragma omp parallel for shared(residuals) for (size_t index = 0; index < source_points.size(); index++) { Vector2T source_pointT = source_points[index].cast<T>(); Vector2T target_pointT = target_points[index].cast<T>(); // Transform source_point into the frame of target_point source_pointT = source_to_target * source_pointT; Vector2T difference_in_target = target_pointT - source_pointT; residuals[index * 2] = difference_in_target(0); residuals[index * 2 + 1] = difference_in_target(1); } return true; } LIDARPointResidual(const std::vector<Eigen::Vector2f> &source_points, const std::vector<Eigen::Vector2f> &target_points, const std::vector<Eigen::Vector2f> &source_normals, const std::vector<Eigen::Vector2f> &target_normals) : source_points(source_points), target_points(target_points), source_normals(source_normals), target_normals(target_normals) { CHECK_EQ(source_points.size(), target_points.size()); CHECK_EQ(target_points.size(), target_normals.size()); CHECK_EQ(source_normals.size(), target_normals.size()); } static ceres::AutoDiffCostFunction<LIDARPointResidual, ceres::DYNAMIC, 3, 3> *create(const std::vector<Eigen::Vector2f> &source_points, const std::vector<Eigen::Vector2f> &target_points, const std::vector<Eigen::Vector2f> &source_normals, const std::vector<Eigen::Vector2f> &target_normals) { CHECK_GT(source_points.size(), 0); LIDARPointResidual *residual = new LIDARPointResidual( source_points, target_points, source_normals, target_normals); return new ceres::AutoDiffCostFunction<LIDARPointResidual, ceres::DYNAMIC, 3, 3>(residual, source_points.size() * 2); } const std::vector<Eigen::Vector2f> source_points; const std::vector<Eigen::Vector2f> target_points; const std::vector<Eigen::Vector2f> source_normals; const std::vector<Eigen::Vector2f> target_normals; }; struct PointToLineResidual { template <typename T> bool operator()(const T *pose, const T *line_pose, T *residuals) const { typedef Eigen::Matrix<T, 2, 1> Vector2T; typedef Eigen::Transform<T, 2, Eigen::Affine> Affine2T; const Affine2T pose_to_world = PoseArrayToAffine(&pose[2], &pose[0]); const Affine2T line_to_world = PoseArrayToAffine(&line_pose[2], &line_pose[0]); Vector2T line_start = line_to_world * line_segment_.start.cast<T>(); Vector2T line_end = line_to_world * line_segment_.end.cast<T>(); const LineSegment<T> TransformedLineSegment(line_start, line_end); #pragma omp parallel for shared(residuals) for (size_t index = 0; index < points_.size(); index++) { Vector2T pointT = points_[index].cast<T>(); // Transform source_point into the frame of the line pointT = pose_to_world * pointT; T dist_along_normal = DistanceToLineSegment(pointT, TransformedLineSegment); residuals[index] = dist_along_normal; } return true; } PointToLineResidual(const LineSegment<float> &line_segment, const vector<Vector2f> points) : line_segment_(line_segment), points_(points) {} static ceres::AutoDiffCostFunction<PointToLineResidual, ceres::DYNAMIC, 3, 3> *create(const LineSegment<float> &line_segment, const vector<Vector2f> points) { PointToLineResidual *res = new PointToLineResidual(line_segment, points); return new ceres::AutoDiffCostFunction<PointToLineResidual, ceres::DYNAMIC, 3, 3>(res, points.size()); } const LineSegment<float> line_segment_; const std::vector<Eigen::Vector2f> points_; }; } // namespace nautilus #endif
ospf_fmt_plug.c
/* * This software is Copyright (c) 2017, Dhiru Kholia <dhiru [at] openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Special thanks goes to the Loki project for providing the sample pcap files, * and for implementing the cryptographic functions involved in RFC 5709 * clearly. * * https://c0decafe.de/svn/codename_loki/ */ #if FMT_EXTERNS_H extern struct fmt_main fmt_ospf; #elif FMT_REGISTERS_H john_register_one(&fmt_ospf); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #endif #include "formats.h" #include "sha.h" #include "sha2.h" #include "hmac_sha.h" #include "misc.h" #include "common.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "ospf" #define FORMAT_NAME "OSPF / IS-IS" #define FORMAT_TAG "$ospf$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "HMAC-SHA-X 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define MAX_SALT_LEN 1500 + 64 // 64 is reserved for appending ospf_apad static struct fmt_tests tests[] = { /* ospf*.pcap from https://c0decafe.de/svn/codename_loki/ */ {"$ospf$1$02010030ac10001400000000000000020000011454ee4518ffffff00000a120100000028c0a86f14c0a86f0aac10000a$e59ba2c56a2c0429ebe72a194e4b54c250cac1a3", "1234"}, {"$ospf$2$0201002cac10000a00000000000000020000012054f4c8adffffff00000a120100000028c0a86f0a00000000$508a1abffb5b4554e1aa46eb053bca7105c3e8f6fece4c945f0a0020edb054ec", "1234"}, {"$ospf$3$0201002cac10000a00000000000000020000013054f4c8e4ffffff00000a120100000028c0a86f0a00000000$9dcf336773034f4ad8b0e19c52546ba72fd91d79d9416c9c1c4854002d3c0b5fc7c80fc1c4994ab9b6c48d9c6ac03587", "1234"}, {"$ospf$4$0201002cac10000a00000000000000020000014054f4c912ffffff00000a120100000028c0a86f0a00000000$4faa125881137ab3257ee9c8626d0ffa0c387c2e41a832d435afffc41d35881360fbe74442191a8aef201a4aad2689577a0c26a3cc5c681e72f09c297d16ba6a", "1234"}, /* isis*.pcap from https://c0decafe.de/svn/codename_loki/ */ {"$ospf$1$831401001101000301192168201101001b004e000104034900018102cc8e8404c0a8ca00f00f0000000003192168201104000000030a17030001$0a33e7acf138d0bfb2b197f331bbd8ae237e0465", "1234"}, {"$ospf$2$831401001101000301192168201101001b005a000104034900018102cc8e8404c0a8ca00f00f0000000003192168201104000000030a23030002$3082271800f8fab2976d57bb5d1d6e182189b9a2d542f48371da934f854acab9", "1234"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { uint32_t salt_length; uint32_t type; unsigned char salt[MAX_SALT_LEN]; // fixed len, but should be OK } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(saved_key); MEM_FREE(crypt_out); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int value, extra; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "$")) == NULL) // type goto err; if (!isdec(p)) goto err; value = atoi(p); if (value != 1 && value != 2 && value != 3 && value != 4) goto err; if ((p = strtokm(NULL, "$")) == NULL) // salt goto err; if (hexlenl(p, &extra) > MAX_SALT_LEN * 2 || extra) goto err; if ((p = strtokm(NULL, "$")) == NULL) // binary goto err; value = hexlenl(p, &extra); if (value < 20 * 2 || value > 64 * 2 || extra) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } // https://tools.ietf.org/rfc/rfc5709.txt and Loki static const char ospf_apad[] = { 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3 }; static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; memset(&cs, 0, SALT_SIZE); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "$"); // type cs.type = atoi(p); p = strtokm(NULL, "$"); // salt cs.salt_length = strlen(p) / 2; for (i = 0; i < cs.salt_length; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; memcpy(cs.salt + cs.salt_length, ospf_apad, 64); MEM_FREE(keeptr); return &cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } #ifndef SHA_DIGEST_LENGTH #define SHA_DIGEST_LENGTH 20 #endif #ifndef SHA256_DIGEST_LENGTH #define SHA256_DIGEST_LENGTH 32 #endif #ifndef SHA384_DIGEST_LENGTH #define SHA384_DIGEST_LENGTH 48 #endif #ifndef SHA512_DIGEST_LENGTH #define SHA512_DIGEST_LENGTH 64 #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { int plen = strlen(saved_key[index]); unsigned char key[64]; unsigned char out[64]; if (cur_salt->type == 1) { SHA_CTX ctx; // process password according to rfc5709 if (plen < SHA_DIGEST_LENGTH) { memcpy(key, saved_key[index], plen); memset(key + plen, 0, SHA_DIGEST_LENGTH - plen); } else if (plen == SHA_DIGEST_LENGTH) { memcpy(key, saved_key[index], SHA_DIGEST_LENGTH); } else { SHA1_Init(&ctx); SHA1_Update(&ctx, saved_key[index], plen); SHA1_Final(key, &ctx); } // salt already has ospf_apad appended hmac_sha1(key, 20, cur_salt->salt, cur_salt->salt_length + SHA_DIGEST_LENGTH, out, 16); memcpy((unsigned char*)crypt_out[index], out, 16); } else if (cur_salt->type == 2) { SHA256_CTX ctx; if (plen < SHA256_DIGEST_LENGTH) { memcpy(key, saved_key[index], plen); memset(key + plen, 0, SHA256_DIGEST_LENGTH - plen); } else if (plen == SHA256_DIGEST_LENGTH) { memcpy(key, saved_key[index], SHA256_DIGEST_LENGTH); } else { SHA256_Init(&ctx); SHA256_Update(&ctx, saved_key[index], plen); SHA256_Final(key, &ctx); } hmac_sha256(key, 32, cur_salt->salt, cur_salt->salt_length + SHA256_DIGEST_LENGTH, out, 16); memcpy((unsigned char*)crypt_out[index], out, 16); } else if (cur_salt->type == 3) { SHA512_CTX ctx; if (plen < SHA384_DIGEST_LENGTH) { memcpy(key, saved_key[index], plen); memset(key + plen, 0, SHA384_DIGEST_LENGTH - plen); } else if (plen == SHA384_DIGEST_LENGTH) { memcpy(key, saved_key[index], SHA384_DIGEST_LENGTH); } else { SHA384_Init(&ctx); SHA384_Update(&ctx, saved_key[index], plen); SHA384_Final(key, &ctx); } hmac_sha384(key, 48, cur_salt->salt, cur_salt->salt_length + SHA384_DIGEST_LENGTH, out, 16); memcpy((unsigned char*)crypt_out[index], out, 16); } else if (cur_salt->type == 4) { SHA512_CTX ctx; if (plen < SHA512_DIGEST_LENGTH) { memcpy(key, saved_key[index], plen); memset(key + plen, 0, SHA512_DIGEST_LENGTH - plen); } else if (plen == SHA512_DIGEST_LENGTH) { memcpy(key, saved_key[index], SHA512_DIGEST_LENGTH); } else { SHA512_Init(&ctx); SHA512_Update(&ctx, saved_key[index], plen); SHA512_Final(key, &ctx); } hmac_sha512(key, 64, cur_salt->salt, cur_salt->salt_length + SHA512_DIGEST_LENGTH, out, 16); memcpy((unsigned char*)crypt_out[index], out, 16); } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (((uint32_t*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void ospf_set_key(char *key, int index) { strnzcpyn(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_ospf = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, ospf_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unaryop__abs_uint32_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint32_fp32 // op(A') function: GB_tran__abs_uint32_fp32 // C type: uint32_t // A type: float // cast: uint32_t cij ; GB_CAST_UNSIGNED(cij,aij,32) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint32_t z ; GB_CAST_UNSIGNED(z,x,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint32_fp32 ( uint32_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__div_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__div_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__div_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__div_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint8) // A*D function (colscale): GB (_AxD__div_uint8) // D*A function (rowscale): GB (_DxB__div_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__div_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__div_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint8) // C=scalar+B GB (_bind1st__div_uint8) // C=scalar+B' GB (_bind1st_tran__div_uint8) // C=A+scalar GB (_bind2nd__div_uint8) // C=A'+scalar GB (_bind2nd_tran__div_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 8) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_UNSIGNED (x, y, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_UINT8 || GxB_NO_DIV_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__div_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__div_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (x, bij, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (aij, y, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 8) ; \ } GrB_Info GB (_bind1st_tran__div_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 8) ; \ } GrB_Info GB (_bind2nd_tran__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
parallel_team.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt, multicpu // UNSUPPORTED: gcc #include "callback.h" int main() { #pragma omp target teams num_teams(1) thread_limit(2) #pragma omp parallel num_threads(2) { printf("In teams\n"); } return 0; } // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK-NOT: 0: parallel_data initially not null // CHECK-NOT: 0: task_data initially not null // CHECK-NOT: 0: thread_data initially not null // CHECK: {{^}}[[MASTER:[0-9]+]]: ompt_event_initial_task_begin: // CHECK-SAME: task_id=[[INIT_TASK:[0-9]+]], {{.*}}, index=1 // CHECK: {{^}}[[MASTER]]: ompt_event_teams_begin: // CHECK-SAME: parent_task_id=[[INIT_TASK]] // CHECK-SAME: {{.*}} requested_num_teams=1 // CHECK-SAME: {{.*}} invoker=[[TEAMS_FLAGS:[0-9]+]] // // team 0/thread 0 // // initial task in the teams construct // CHECK: {{^}}[[MASTER]]: ompt_event_initial_task_begin: // CHECK-SAME: task_id=[[INIT_TASK_0:[0-9]+]], actual_parallelism=1, index=0 // parallel region forked by runtime // CHECK: {{^}}[[MASTER]]: ompt_event_parallel_begin: // CHECK-SAME: {{.*}} parent_task_id=[[INIT_TASK_0]] // CHECK-SAME: {{.*}} parallel_id=[[PAR_0:[0-9]+]] // CHECK: {{^}}[[MASTER]]: ompt_event_implicit_task_begin: // CHECK-SAME: {{.*}} parallel_id=[[PAR_0]], task_id=[[IMPL_TASK_0:[0-9]+]] // user parallel region // CHECK: {{^}}[[MASTER]]: ompt_event_parallel_begin: // CHECK-SAME: {{.*}} parent_task_id=[[IMPL_TASK_0]] // CHECK-SAME: {{.*}} parallel_id=[[PAR_00:[0-9]+]] // CHECK-SAME: {{.*}} requested_team_size=2 // CHECK: {{^}}[[MASTER]]: ompt_event_implicit_task_begin: // CHECK-SAME: {{.*}} parallel_id=[[PAR_00]], task_id=[[IMPL_TASK_00:[0-9]+]] // CHECK-SAME: {{.*}} team_size=2, thread_num=0 // // barrier event is here // // CHECK: {{^}}[[MASTER]]: ompt_event_implicit_task_end: // CHECK-SAME: {{.*}} parallel_id={{[0-9]+}}, task_id=[[IMPL_TASK_00]] // CHECK: {{^}}[[MASTER]]: ompt_event_parallel_end: // CHECK-SAME: {{.*}} parallel_id=[[PAR_00]], task_id=[[IMPL_TASK_0]] // CHECK: {{^}}[[MASTER]]: ompt_event_implicit_task_end: // CHECK-SAME: {{.*}} parallel_id={{[0-9]+}}, task_id=[[IMPL_TASK_0]] // CHECK: {{^}}[[MASTER]]: ompt_event_parallel_end: // CHECK-SAME: {{.*}} parallel_id=[[PAR_0]], task_id=[[INIT_TASK_0]] // CHECK: {{^}}[[MASTER]]: ompt_event_initial_task_end: // CHECK-SAME: task_id=[[INIT_TASK_0]], actual_parallelism=0, index=0 // CHECK: {{^}}[[MASTER]]: ompt_event_teams_end: // CHECK-SAME: {{.*}} task_id=[[INIT_TASK]], invoker=[[TEAMS_FLAGS]] // CHECK: {{^}}[[MASTER]]: ompt_event_initial_task_end: // CHECK-SAME: task_id=[[INIT_TASK]], {{.*}}, index=1 // // team 0/thread 1 // // CHECK: {{^}}[[WORKER:[0-9]+]]: ompt_event_implicit_task_begin: // CHECK-SAME: {{.*}} parallel_id=[[PAR_00]], task_id=[[IMPL_TASK_01:[0-9]+]] // CHECK-SAME: {{.*}} team_size=2, thread_num=1 // // barrier event is here // // CHECK: {{^}}[[WORKER]]: ompt_event_implicit_task_end: // CHECK-SAME: {{.*}} parallel_id={{[0-9]+}}, task_id=[[IMPL_TASK_01]]
pado_unw_unv_para.201912311607.parallel_bp_labeling.h
/* * pado.h * * Created on: Sep 4, 2018 * Author: Zhen Peng */ #ifndef INCLUDES_PADO_UNW_PARA_UNV_H_ #define INCLUDES_PADO_UNW_PARA_UNV_H_ #include <vector> #include <unordered_map> #include <map> #include <algorithm> #include <iostream> #include <limits.h> #include <xmmintrin.h> #include <bitset> #include <cmath> #include <atomic> #include "globals.h" #include "graph.h" #include <omp.h> using std::vector; using std::unordered_map; using std::map; using std::bitset; using std::stable_sort; using std::min; using std::fill; namespace PADO { //inti NUM_THREADS = 4; //const inti BATCH_SIZE = 1024; // The size for regular batch and bit array. //const inti BITPARALLEL_SIZE = 50; //const inti THRESHOLD_PARALLEL = 80; //// Batch based processing, 09/11/2018 template<inti BATCH_SIZE = 1024> class ParaVertexCentricPLL { private: static const inti BITPARALLEL_SIZE = 50; idi num_v_ = 0; const inti THRESHOLD_PARALLEL = 80; // Structure for the type of label struct IndexType { struct Batch { idi batch_id; // Batch ID idi start_index; // Index to the array distances where the batch starts inti size; // Number of distances element in this batch Batch(idi batch_id_, idi start_index_, inti size_) : batch_id(batch_id_), start_index(start_index_), size(size_) { ; } }; struct DistanceIndexType { idi start_index; // Index to the array vertices where the same-ditance vertices start inti size; // Number of the same-distance vertices smalli dist; // The real distance DistanceIndexType(idi start_index_, inti size_, smalli dist_) : start_index(start_index_), size(size_), dist(dist_) { ; } }; smalli bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0} vector<Batch> batches; // Batch info vector<DistanceIndexType> distances; // Distance info vector<idi> vertices; // Vertices in the label, preresented as temperory ID }; //__attribute__((aligned(64))); // Structure for the type of temporary label struct ShortIndex { // I use BATCH_SIZE + 1 bit for indicator bit array. // The v.indicator[BATCH_SIZE] is set if in current batch v has got any new labels already. // In this way, it helps update_label_indices() and can be reset along with other indicator elements. // bitset<BATCH_SIZE + 1> indicator; // Global indicator, indicator[r] (0 <= r < BATCH_SIZE) is set means root r once selected as candidate already // std::vector<std::atomic_bool> indicator; std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE + 1, 0); // Use a queue to store candidates vector<inti> candidates_que = vector<inti>(BATCH_SIZE); inti end_candidates_que = 0; vector<uint8_t> is_candidate = vector<uint8_t>(BATCH_SIZE, 0); // ShortIndex() // { // indicator.resize(BATCH_SIZE + 1); // indicator_reset(); // } void indicator_reset() { const idi bound = indicator.size(); std::fill(indicator.begin(), indicator.end(), 0); //#pragma omp parallel for // for (idi i = 0; i < bound; ++i) { // indicator[i].store(false, std::memory_order_relaxed); // } } }; //__attribute__((aligned(64))); // Structure of the public ordered index for distance queries. struct IndexOrdered { weighti bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0} vector<idi> label_id; vector<weighti> label_dists; }; vector<IndexType> L; vector<IndexOrdered> Index; // Ordered labels for original vertex ID void construct(const Graph &G); inline void bit_parallel_labeling( const Graph &G, vector<IndexType> &L, vector<uint8_t> &used_bp_roots); // inline void bit_parallel_labeling( // const Graph &G, // vector<IndexType> &L, // vector<bool> &used_bp_roots); inline void batch_process( const Graph &G, idi b_id, idi roots_start, // start id of roots inti roots_size, // how many roots in the batch vector<IndexType> &L, const vector<uint8_t> &used_bp_roots, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &candidate_queue, idi &end_candidate_queue, vector<ShortIndex> &short_index, vector<vector<smalli> > &dist_matrix, vector<uint8_t> &got_candidates, vector<uint8_t> &is_active, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, vector<uint8_t> &once_candidated); // inline void batch_process( // const Graph &G, // idi b_id, // idi root_start, // inti roots_size, // vector<IndexType> &L, // const vector<bool> &used_bp_roots); inline void initialize( vector<ShortIndex> &short_index, vector<vector<smalli> > &dist_matrix, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, idi b_id, idi roots_start, inti roots_size, vector<IndexType> &L, const vector<uint8_t> &used_bp_roots); inline void push_labels( idi v_head, idi roots_start, const Graph &G, const vector<IndexType> &L, vector<ShortIndex> &short_index, // vector<idi> &candidate_queue, // idi &end_candidate_queue, vector<idi> &tmp_candidate_queue, idi &size_tmp_candidate_queue, const idi offset_tmp_queue, // idi &offset_tmp_candidate_queue, // vector<bool> &got_candidates, vector<uint8_t> &got_candidates, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, const vector<uint8_t> &used_bp_roots, smalli iter); inline bool distance_query( idi cand_root_id, idi v_id, idi roots_start, const vector<IndexType> &L, const vector<vector<smalli> > &dist_matrix, smalli iter); inline void insert_label_only( idi cand_root_id, idi v_id, idi roots_start, inti roots_size, vector<IndexType> &L, vector<vector<smalli> > &dist_matrix, smalli iter); inline void update_label_indices( idi v_id, idi inserted_count, vector<IndexType> &L, vector<ShortIndex> &short_index, idi b_id, smalli iter); inline void reset_at_end( idi roots_start, inti roots_size, vector<IndexType> &L, vector<vector<smalli> > &dist_matrix); // Some parallel interfaces inline idi prefix_sum_for_offsets( vector<idi> &offsets); template<typename T> inline void collect_into_queue( vector<T> &tmp_queue, vector<idi> &offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue vector<idi> &offsets_queue, // the locations in queue for writing into queue. idi num_elements, // total number of elements which need to be added from tmp_queue to queue vector<T> &queue, idi &end_queue); template<typename T, typename Int> inline void TS_enqueue( vector<T> &queue, Int &end_queue, const T &e); // Test only // uint64_t normal_hit_count = 0; uint64_t bp_hit_count = 0; // uint64_t total_check_count = 0; // double initializing_time = 0; // double candidating_time = 0; // double adding_time = 0; // double distance_query_time = 0; // double init_index_time = 0; // double init_dist_matrix_time = 0; // double init_start_reset_time = 0; // double init_indicators_time = 0; //#ifdef PROFILE // vector<double> thds_adding_time = vector<double>(80, 0.0); // vector<uint64_t> thds_adding_count = vector<uint64_t>(80, 0); // L2CacheMissRate cache_miss; //#endif // vector<ShortIndex> tmp_short_index; // vector<ShortIndex> now_short_index; // End test public: ParaVertexCentricPLL() = default; ParaVertexCentricPLL(const Graph &G); weighti query( idi u, idi v); void print(); void switch_labels_to_old_id( const vector<idi> &rank2id, const vector<idi> &rank); void store_index_to_file( const char *filename, const vector<idi> &rank); void load_index_from_file( const char *filename); void order_labels( const vector<idi> &rank2id, const vector<idi> &rank); weighti query_distance( idi a, idi b); }; // class ParaVertexCentricPLL template<inti BATCH_SIZE> const inti ParaVertexCentricPLL<BATCH_SIZE>::BITPARALLEL_SIZE; template<inti BATCH_SIZE> ParaVertexCentricPLL<BATCH_SIZE>::ParaVertexCentricPLL(const Graph &G) { construct(G); } //template<inti BATCH_SIZE> //inline void ParaVertexCentricPLL<BATCH_SIZE>::bit_parallel_labeling( // const Graph &G, // vector<IndexType> &L, // vector<uint8_t> &used_bp_roots) // CAS needs array //{ // idi num_v = G.get_num_v(); // idi num_e = G.get_num_e(); // // if (num_v <= BITPARALLEL_SIZE) { //// if (true) {} // // Sequential version // std::vector<weighti> tmp_d(num_v); // distances from the root to every v // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<idi> que(num_v); // active queue // std::vector<std::pair<idi, idi> > sibling_es( // num_e); // siblings, their distances to the root are equal (have difference of 0) // std::vector<std::pair<idi, idi> > child_es( // num_e); // child and father, their distances to the root have difference of 1. // idi r = 0; // root r // for (inti i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // while (r < num_v && used_bp_roots[r]) { // ++r; // } // if (r == num_v) { // for (idi v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = SMALLI_MAX; // } // continue; // } // used_bp_roots[r] = 1; // // fill(tmp_d.begin(), tmp_d.end(), SMALLI_MAX); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // idi que_t0 = 0, que_t1 = 0, que_h = 0; // que[que_h++] = r; // tmp_d[r] = 0; // que_t1 = que_h; // // int ns = 0; // number of selected neighbor, default 64 // // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. // // idi i_bound = G.vertices[r] - 1; // // idi i_start = i_bound + G.out_degrees[r]; // // for (idi i = i_start; i > i_bound; --i) {} // idi d_i_bound = G.out_degrees[r]; // idi i_start = G.vertices[r] + d_i_bound - 1; // for (idi d_i = 0; d_i < d_i_bound; ++d_i) { // idi i = i_start - d_i; // idi v = G.out_edges[i]; // if (!used_bp_roots[v]) { // used_bp_roots[v] = 1; // // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) // que[que_h++] = v; // tmp_d[v] = 1; // tmp_s[v].first = 1ULL << ns; // if (++ns == 64) break; // } // } // // for (weighti d = 0; que_t0 < que_h; ++d) { // idi num_sibling_es = 0, num_child_es = 0; // // for (idi que_i = que_t0; que_i < que_t1; ++que_i) { // idi v = que[que_i]; // idi i_start = G.vertices[v]; // idi i_bound = i_start + G.out_degrees[v]; // for (idi i = i_start; i < i_bound; ++i) { // idi tv = G.out_edges[i]; // weighti td = d + 1; // // if (d > tmp_d[tv]) { ; // } else if (d == tmp_d[tv]) { // if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. // sibling_es[num_sibling_es].first = v; // sibling_es[num_sibling_es].second = tv; // ++num_sibling_es; //// tmp_s[v].second |= tmp_s[tv].first; //// tmp_s[tv].second |= tmp_s[v].first; // } // } else { // d < tmp_d[tv] // if (tmp_d[tv] == SMALLI_MAX) { // que[que_h++] = tv; // tmp_d[tv] = td; // } // child_es[num_child_es].first = v; // child_es[num_child_es].second = tv; // ++num_child_es; //// tmp_s[tv].first |= tmp_s[v].first; //// tmp_s[tv].second |= tmp_s[v].second; // } // } // } // // for (idi i = 0; i < num_sibling_es; ++i) { // idi v = sibling_es[i].first, w = sibling_es[i].second; // tmp_s[v].second |= tmp_s[w].first; // tmp_s[w].second |= tmp_s[v].first; // } // for (idi i = 0; i < num_child_es; ++i) { // idi v = child_es[i].first, c = child_es[i].second; // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; // } // // que_t0 = que_t1; // que_t1 = que_h; // } // // for (idi v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = tmp_d[v]; // L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} // L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & // ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } // } else { // // Parallel version: Naive parallel enqueue // std::vector<weighti> tmp_d(num_v); // distances from the root to every v // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<idi> que(num_v); // active queue // std::vector<std::pair<idi, idi> > sibling_es( // num_e); // siblings, their distances to the root are equal (have difference of 0) // std::vector<std::pair<idi, idi> > child_es( // num_e); // child and father, their distances to the root have difference of 1. // idi r = 0; // root r // for (inti i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // while (r < num_v && used_bp_roots[r]) { // ++r; // } // if (r == num_v) { // for (idi v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = SMALLI_MAX; // } // continue; // } // used_bp_roots[r] = 1; // // fill(tmp_d.begin(), tmp_d.end(), SMALLI_MAX); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // idi que_t0 = 0, que_t1 = 0, que_h = 0; // que[que_h++] = r; // tmp_d[r] = 0; // que_t1 = que_h; // // int ns = 0; // number of selected neighbor, default 64 // // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. // // idi i_bound = G.vertices[r] - 1; // // idi i_start = i_bound + G.out_degrees[r]; // // for (idi i = i_start; i > i_bound; --i) {} // idi d_i_bound = G.out_degrees[r]; // idi i_start = G.vertices[r] + d_i_bound - 1; // for (idi d_i = 0; d_i < d_i_bound; ++d_i) { // idi i = i_start - d_i; // idi v = G.out_edges[i]; // if (!used_bp_roots[v]) { // used_bp_roots[v] = 1; // // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) // que[que_h++] = v; // tmp_d[v] = 1; // tmp_s[v].first = 1ULL << ns; // if (++ns == 64) break; // } // } // // for (weighti d = 0; que_t0 < que_h; ++d) { // idi num_sibling_es = 0, num_child_es = 0; // // for (idi que_i = que_t0; que_i < que_t1; ++que_i) { // idi v = que[que_i]; // idi i_start = G.vertices[v]; // idi i_bound = i_start + G.out_degrees[v]; // for (idi i = i_start; i < i_bound; ++i) { // idi tv = G.out_edges[i]; // weighti td = d + 1; // // if (d > tmp_d[tv]) { ; // } else if (d == tmp_d[tv]) { // if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. // sibling_es[num_sibling_es].first = v; // sibling_es[num_sibling_es].second = tv; // ++num_sibling_es; //// tmp_s[v].second |= tmp_s[tv].first; //// tmp_s[tv].second |= tmp_s[v].first; // } // } else { // d < tmp_d[tv] // if (tmp_d[tv] == SMALLI_MAX) { // que[que_h++] = tv; // tmp_d[tv] = td; // } // child_es[num_child_es].first = v; // child_es[num_child_es].second = tv; // ++num_child_es; //// tmp_s[tv].first |= tmp_s[v].first; //// tmp_s[tv].second |= tmp_s[v].second; // } // } // } // // for (idi i = 0; i < num_sibling_es; ++i) { // idi v = sibling_es[i].first, w = sibling_es[i].second; // tmp_s[v].second |= tmp_s[w].first; // tmp_s[w].second |= tmp_s[v].first; // } // for (idi i = 0; i < num_child_es; ++i) { // idi v = child_es[i].first, c = child_es[i].second; // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; // } // // que_t0 = que_t1; // que_t1 = que_h; // } // //#pragma omp parallel for // for (idi v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = tmp_d[v]; //// L[v].bp_sets_0[i_bpspt] = tmp_s[v].first; // S_r^{-1} //// L[v].bp_sets_1[i_bpspt] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} // L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} // L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & // ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } // } //} template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::bit_parallel_labeling( const Graph &G, vector<IndexType> &L, vector<uint8_t> &used_bp_roots) { idi num_v = G.get_num_v(); idi num_e = G.get_num_e(); if (num_v <= BITPARALLEL_SIZE) { // Sequential version std::vector<weighti> tmp_d(num_v); // distances from the root to every v std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} std::vector<idi> que(num_v); // active queue std::vector<std::pair<idi, idi> > sibling_es( num_e); // siblings, their distances to the root are equal (have difference of 0) std::vector<std::pair<idi, idi> > child_es( num_e); // child and father, their distances to the root have difference of 1. idi r = 0; // root r for (inti i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { while (r < num_v && used_bp_roots[r]) { ++r; } if (r == num_v) { for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = SMALLI_MAX; } continue; } used_bp_roots[r] = 1; fill(tmp_d.begin(), tmp_d.end(), SMALLI_MAX); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); idi que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; tmp_d[r] = 0; que_t1 = que_h; int ns = 0; // number of selected neighbor, default 64 // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. // idi i_bound = G.vertices[r] - 1; // idi i_start = i_bound + G.out_degrees[r]; // for (idi i = i_start; i > i_bound; --i) {} idi d_i_bound = G.out_degrees[r]; idi i_start = G.vertices[r] + d_i_bound - 1; for (idi d_i = 0; d_i < d_i_bound; ++d_i) { idi i = i_start - d_i; idi v = G.out_edges[i]; if (!used_bp_roots[v]) { used_bp_roots[v] = 1; // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) que[que_h++] = v; tmp_d[v] = 1; tmp_s[v].first = 1ULL << ns; if (++ns == 64) break; } } for (weighti d = 0; que_t0 < que_h; ++d) { idi num_sibling_es = 0, num_child_es = 0; for (idi que_i = que_t0; que_i < que_t1; ++que_i) { idi v = que[que_i]; idi i_start = G.vertices[v]; idi i_bound = i_start + G.out_degrees[v]; for (idi i = i_start; i < i_bound; ++i) { idi tv = G.out_edges[i]; weighti td = d + 1; if (d > tmp_d[tv]) { ; } else if (d == tmp_d[tv]) { if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. sibling_es[num_sibling_es].first = v; sibling_es[num_sibling_es].second = tv; ++num_sibling_es; // tmp_s[v].second |= tmp_s[tv].first; // tmp_s[tv].second |= tmp_s[v].first; } } else { // d < tmp_d[tv] if (tmp_d[tv] == SMALLI_MAX) { que[que_h++] = tv; tmp_d[tv] = td; } child_es[num_child_es].first = v; child_es[num_child_es].second = tv; ++num_child_es; // tmp_s[tv].first |= tmp_s[v].first; // tmp_s[tv].second |= tmp_s[v].second; } } } for (idi i = 0; i < num_sibling_es; ++i) { idi v = sibling_es[i].first, w = sibling_es[i].second; tmp_s[v].second |= tmp_s[w].first; tmp_s[w].second |= tmp_s[v].first; } for (idi i = 0; i < num_child_es; ++i) { idi v = child_es[i].first, c = child_es[i].second; tmp_s[c].first |= tmp_s[v].first; tmp_s[c].second |= tmp_s[v].second; } que_t0 = que_t1; que_t1 = que_h; } for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = tmp_d[v]; L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} } } } else { // Parallel version: parallel queues (graph traverse), but sequential beginning (roots selecting). std::vector<smalli> tmp_d(num_v); // distances from the root to every v std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} std::vector<idi> que(num_v); // active queue std::vector<std::pair<idi, idi> > sibling_es( num_e); // siblings, their distances to the root are equal (have difference of 0) std::vector<std::pair<idi, idi> > child_es( num_e); // child and father, their distances to the root have difference of 1. idi r = 0; // root r for (inti i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { while (r < num_v && used_bp_roots[r]) { ++r; } if (r == num_v) { for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = SMALLI_MAX; } continue; } used_bp_roots[r] = 1; std::fill(tmp_d.begin(), tmp_d.end(), SMALLI_MAX); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); idi que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; tmp_d[r] = 0; que_t1 = que_h; int ns = 0; // number of selected neighbor, default 64 // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. // idi i_bound = G.vertices[r] - 1; // idi i_start = i_bound + G.out_degrees[r]; // for (idi i = i_start; i > i_bound; --i) {} idi d_i_bound = G.out_degrees[r]; idi i_start = G.vertices[r] + d_i_bound - 1; for (idi d_i = 0; d_i < d_i_bound; ++d_i) { idi i = i_start - d_i; idi v = G.out_edges[i]; if (!used_bp_roots[v]) { used_bp_roots[v] = 1; // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) que[que_h++] = v; tmp_d[v] = 1; tmp_s[v].first = 1ULL << ns; if (++ns == 64) break; } } for (weighti d = 0; que_t0 < que_h; ++d) { idi num_sibling_es = 0, num_child_es = 0; // For parallel adding to que idi que_size = que_t1 - que_t0; vector<idi> offsets_tmp_queue(que_size); #pragma omp parallel for for (idi i_q = 0; i_q < que_size; ++i_q) { offsets_tmp_queue[i_q] = G.out_degrees[que[que_t0 + i_q]]; } idi num_neighbors = prefix_sum_for_offsets(offsets_tmp_queue); vector<idi> tmp_que(num_neighbors); vector<idi> sizes_tmp_que(que_size, 0); // For parallel adding to sibling_es vector<std::pair<idi, idi> > tmp_sibling_es(num_neighbors); vector<idi> sizes_tmp_sibling_es(que_size, 0); // For parallel adding to child_es vector<std::pair<idi, idi> > tmp_child_es(num_neighbors); vector<idi> sizes_tmp_child_es(que_size, 0); #pragma omp parallel for for (idi que_i = que_t0; que_i < que_t1; ++que_i) { idi tmp_que_i = que_i - que_t0; // location in the tmp_que idi v = que[que_i]; idi i_start = G.vertices[v]; idi i_bound = i_start + G.out_degrees[v]; for (idi i = i_start; i < i_bound; ++i) { idi tv = G.out_edges[i]; smalli td = d + 1; if (d > tmp_d[tv]) { ; } else if (d == tmp_d[tv]) { if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. idi &size_in_group = sizes_tmp_sibling_es[tmp_que_i]; tmp_sibling_es[offsets_tmp_queue[tmp_que_i] + size_in_group].first = v; tmp_sibling_es[offsets_tmp_queue[tmp_que_i] + size_in_group].second = tv; ++size_in_group; // sibling_es[num_sibling_es].first = v; // sibling_es[num_sibling_es].second = tv; // ++num_sibling_es; } } else { // d < tmp_d[tv] if (tmp_d[tv] == SMALLI_MAX) { if (CAS(tmp_d.data() + tv, SMALLI_MAX, td)) { // tmp_d[tv] = td tmp_que[offsets_tmp_queue[tmp_que_i] + sizes_tmp_que[tmp_que_i]++] = tv; } } // if (tmp_d[tv] == SMALLI_MAX) { // que[que_h++] = tv; // tmp_d[tv] = td; // } idi &size_in_group = sizes_tmp_child_es[tmp_que_i]; tmp_child_es[offsets_tmp_queue[tmp_que_i] + size_in_group].first = v; tmp_child_es[offsets_tmp_queue[tmp_que_i] + size_in_group].second = tv; ++size_in_group; // child_es[num_child_es].first = v; // child_es[num_child_es].second = tv; // ++num_child_es; } } } // From tmp_sibling_es to sibling_es idi total_sizes_tmp_queue = prefix_sum_for_offsets(sizes_tmp_sibling_es); collect_into_queue( tmp_sibling_es, offsets_tmp_queue, sizes_tmp_sibling_es, total_sizes_tmp_queue, sibling_es, num_sibling_es); #pragma omp parallel for for (idi i = 0; i < num_sibling_es; ++i) { idi v = sibling_es[i].first, w = sibling_es[i].second; __atomic_or_fetch(&tmp_s[v].second, tmp_s[w].first, __ATOMIC_SEQ_CST); __atomic_or_fetch(&tmp_s[w].second, tmp_s[v].first, __ATOMIC_SEQ_CST); // __sync_or_and_fetch(&tmp_s[v].second, tmp_s[w].first); // __sync_or_and_fetch(&tmp_s[w].second, tmp_s[v].first); // tmp_s[v].second |= tmp_s[w].first; // tmp_s[w].second |= tmp_s[v].first; } // From tmp_child_es to child_es total_sizes_tmp_queue = prefix_sum_for_offsets(sizes_tmp_child_es); collect_into_queue( tmp_child_es, offsets_tmp_queue, sizes_tmp_child_es, total_sizes_tmp_queue, child_es, num_child_es); #pragma omp parallel for for (idi i = 0; i < num_child_es; ++i) { idi v = child_es[i].first, c = child_es[i].second; __atomic_or_fetch(&tmp_s[c].first, tmp_s[v].first, __ATOMIC_SEQ_CST); __atomic_or_fetch(&tmp_s[c].second, tmp_s[v].second, __ATOMIC_SEQ_CST); // __sync_or_and_fetch(&tmp_s[c].first, tmp_s[v].first); // __sync_or_and_fetch(&tmp_s[c].second, tmp_s[v].second); // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; } // From tmp_que to que total_sizes_tmp_queue = prefix_sum_for_offsets(sizes_tmp_que); collect_into_queue( tmp_que, offsets_tmp_queue, sizes_tmp_que, total_sizes_tmp_queue, que, que_h); que_t0 = que_t1; que_t1 = que_h; } #pragma omp parallel for for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = tmp_d[v]; L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} } } // free(tmp_d); } } // Function for initializing at the begin of a batch // For a batch, initialize the temporary labels and real labels of roots; // traverse roots' labels to initialize distance buffer; // unset flag arrays is_active and got_labels template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::initialize( vector<ShortIndex> &short_index, vector<vector<smalli> > &dist_matrix, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, idi b_id, idi roots_start, inti roots_size, vector<IndexType> &L, const vector<uint8_t> &used_bp_roots) { idi roots_bound = roots_start + roots_size; // init_start_reset_time -= WallTimer::get_time_mark(); // TODO: parallel enqueue { // active_queue for (idi r_real_id = roots_start; r_real_id < roots_bound; ++r_real_id) { if (!used_bp_roots[r_real_id]) { active_queue[end_active_queue++] = r_real_id; } } } // init_start_reset_time += WallTimer::get_time_mark(); // init_index_time -= WallTimer::get_time_mark(); // Short_index { // init_indicators_time -= WallTimer::get_time_mark(); if (end_once_candidated_queue >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi v_i = 0; v_i < end_once_candidated_queue; ++v_i) { idi v = once_candidated_queue[v_i]; // short_index[v].indicator.reset(); short_index[v].indicator_reset(); once_candidated[v] = 0; } } else { for (idi v_i = 0; v_i < end_once_candidated_queue; ++v_i) { idi v = once_candidated_queue[v_i]; // short_index[v].indicator.reset(); short_index[v].indicator_reset(); once_candidated[v] = 0; } } //#pragma omp parallel for // for (idi v_i = 0; v_i < end_once_candidated_queue; ++v_i) { // idi v = once_candidated_queue[v_i]; // short_index[v].indicator.reset(); // once_candidated[v] = 0; // } end_once_candidated_queue = 0; if (roots_size >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi v = roots_start; v < roots_bound; ++v) { if (!used_bp_roots[v]) { // short_index[v].indicator.set(v - roots_start); // short_index[v].indicator.set(BATCH_SIZE); // v got labels short_index[v].indicator[v - roots_start] = 1; short_index[v].indicator[BATCH_SIZE] = 1; // v got labels } } } else { for (idi v = roots_start; v < roots_bound; ++v) { if (!used_bp_roots[v]) { // short_index[v].indicator.set(v - roots_start); // short_index[v].indicator.set(BATCH_SIZE); // v got labels short_index[v].indicator[v - roots_start] = 1; short_index[v].indicator[BATCH_SIZE] = 1; // v got labels } } } // for (idi v = roots_start; v < roots_bound; ++v) { // if (!used_bp_roots[v]) { // short_index[v].indicator.set(v - roots_start); // short_index[v].indicator.set(BATCH_SIZE); // v got labels // } // } // init_indicators_time += WallTimer::get_time_mark(); } // // Real Index { if (roots_size >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; Lr.batches.push_back(IndexType::Batch( b_id, // Batch ID Lr.distances.size(), // start_index 1)); // size Lr.distances.push_back(IndexType::DistanceIndexType( Lr.vertices.size(), // start_index 1, // size 0)); // dist Lr.vertices.push_back(r_id); } } else { for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; Lr.batches.push_back(IndexType::Batch( b_id, // Batch ID Lr.distances.size(), // start_index 1)); // size Lr.distances.push_back(IndexType::DistanceIndexType( Lr.vertices.size(), // start_index 1, // size 0)); // dist Lr.vertices.push_back(r_id); } } // for (idi r_id = 0; r_id < roots_size; ++r_id) { // if (used_bp_roots[r_id + roots_start]) { // continue; // } // IndexType &Lr = L[r_id + roots_start]; // Lr.batches.push_back(IndexType::Batch( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1)); // size // Lr.distances.push_back(IndexType::DistanceIndexType( // Lr.vertices.size(), // start_index // 1, // size // 0)); // dist // Lr.vertices.push_back(r_id); // } } // init_index_time += WallTimer::get_time_mark(); // init_dist_matrix_time -= WallTimer::get_time_mark(); // Dist_matrix { if (roots_size >= THRESHOLD_PARALLEL) { // schedule dynamic is slower #pragma omp parallel for for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; inti b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { idi id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lr.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { idi v_start_index = Lr.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lr.distances[dist_i].size; smalli dist = Lr.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = dist; } } } } } else { inti b_i_bound; idi id_offset; idi dist_start_index; idi dist_bound_index; idi v_start_index; idi v_bound_index; smalli dist; for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; dist_start_index = Lr.batches[b_i].start_index; dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { v_start_index = Lr.distances[dist_i].start_index; v_bound_index = v_start_index + Lr.distances[dist_i].size; dist = Lr.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = dist; } } } } } // inti b_i_bound; // idi id_offset; // idi dist_start_index; // idi dist_bound_index; // idi v_start_index; // idi v_bound_index; // smalli dist; // for (idi r_id = 0; r_id < roots_size; ++r_id) { // if (used_bp_roots[r_id + roots_start]) { // continue; // } // IndexType &Lr = L[r_id + roots_start]; // b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // for (inti b_i = 0; b_i < b_i_bound; ++b_i) { // id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // dist_start_index = Lr.batches[b_i].start_index; // dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse dist_matrix // for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // v_start_index = Lr.distances[dist_i].start_index; // v_bound_index = v_start_index + Lr.distances[dist_i].size; // dist = Lr.distances[dist_i].dist; // for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { // dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = dist; // } // } // } // } } // init_dist_matrix_time += WallTimer::get_time_mark(); } // Function that pushes v_head's labels to v_head's every neighbor template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::push_labels( idi v_head, idi roots_start, const Graph &G, const vector<IndexType> &L, vector<ShortIndex> &short_index, // vector<idi> &candidate_queue, // idi &end_candidate_queue, vector<idi> &tmp_candidate_queue, idi &size_tmp_candidate_queue, const idi offset_tmp_queue, // idi &offset_tmp_queue, // vector<bool> &got_candidates, vector<uint8_t> &got_candidates, // vector<idi> &once_candidated_queue, // idi &end_once_candidated_queue, vector<idi> &tmp_once_candidated_queue, idi &size_tmp_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, const vector<uint8_t> &used_bp_roots, smalli iter) { const IndexType &Lv = L[v_head]; // These 2 index are used for traversing v_head's last inserted labels idi l_i_start = Lv.distances.rbegin()->start_index; idi l_i_bound = l_i_start + Lv.distances.rbegin()->size; // Traverse v_head's every neighbor v_tail idi e_i_start = G.vertices[v_head]; idi e_i_bound = e_i_start + G.out_degrees[v_head]; for (idi e_i = e_i_start; e_i < e_i_bound; ++e_i) { idi v_tail = G.out_edges[e_i]; if (used_bp_roots[v_head]) { continue; } if (v_tail < roots_start) { // v_tail has higher rank than any roots, then no roots can push new labels to it. return; } // if (v_tail <= Lv.vertices[l_i_start] + roots_start) { // v_tail has higher rank than any v_head's labels // return; // } // This condition cannot be used anymore since v_head's last inserted labels are not ordered from higher rank to lower rank now, because v_head's candidate set is a queue now rather than a bitmap. For a queue, its order of candidates are not ordered by ranks. const IndexType &L_tail = L[v_tail]; _mm_prefetch(&L_tail.bp_dist[0], _MM_HINT_T0); _mm_prefetch(&L_tail.bp_sets[0][0], _MM_HINT_T0); // Traverse v_head's last inserted labels for (idi l_i = l_i_start; l_i < l_i_bound; ++l_i) { inti label_root_id = Lv.vertices[l_i]; idi label_real_id = label_root_id + roots_start; if (v_tail <= label_real_id) { // v_tail has higher rank than all remaining labels // For candidates_que, this is not true any more! // break; continue; } ShortIndex &SI_v_tail = short_index[v_tail]; // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail // SI_v_tail.indicator.set(label_root_id); {// Deal with race condition if (!PADO::CAS(SI_v_tail.indicator.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { // The label is already selected before continue; } } // Add into once_candidated_queue if (!once_candidated[v_tail]) { // If v_tail is not in the once_candidated_queue yet, add it in if (CAS(&once_candidated[v_tail], (uint8_t) 0, (uint8_t) 1)) { tmp_once_candidated_queue[offset_tmp_queue + size_tmp_once_candidated_queue++] = v_tail; } } // CHANGED! // Bit Parallel Checking: if label_real_id to v_tail has shorter distance already // ++total_check_count; const IndexType &L_label = L[label_real_id]; bool no_need_add = false; _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); for (inti i = 0; i < BITPARALLEL_SIZE; ++i) { inti td = L_label.bp_dist[i] + L_tail.bp_dist[i]; if (td - 2 <= iter) { td += (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) ? -1 : 0; if (td <= iter) { no_need_add = true; // ++bp_hit_count; __atomic_add_fetch(&bp_hit_count, 1, __ATOMIC_SEQ_CST); break; } } } if (no_need_add) { continue; } // Record vertex label_root_id as v_tail's candidates label // SI_v_tail.candidates.set(label_root_id); // if (!SI_v_tail.is_candidate[label_root_id]) { // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // } if (!SI_v_tail.is_candidate[label_root_id]) { if (CAS(&SI_v_tail.is_candidate[label_root_id], (uint8_t) 0, (uint8_t) 1)) { TS_enqueue(SI_v_tail.candidates_que, SI_v_tail.end_candidates_que, label_root_id); } } // Add into candidate_queue if (!got_candidates[v_tail]) { // If v_tail is not in candidate_queue, add it in (prevent duplicate) if (CAS(&got_candidates[v_tail], (uint8_t) 0, (uint8_t) 1)) { tmp_candidate_queue[offset_tmp_queue + size_tmp_candidate_queue++] = v_tail; } } } } // printf("v_head: %u, size_tmp_candidate_queue: %u\n", v_head, size_tmp_candidate_queue);//test } // Function for distance query; // traverse vertex v_id's labels; // return the distance between v_id and cand_root_id based on existing labels. // return false if shorter distance exists already, return true if the cand_root_id can be added into v_id's label. template<inti BATCH_SIZE> inline bool ParaVertexCentricPLL<BATCH_SIZE>::distance_query( idi cand_root_id, idi v_id, idi roots_start, const vector<IndexType> &L, const vector<vector<smalli> > &dist_matrix, smalli iter) { // ++total_check_count; // distance_query_time -= WallTimer::get_time_mark(); idi cand_real_id = cand_root_id + roots_start; const IndexType &Lv = L[v_id]; // Traverse v_id's all existing labels inti b_i_bound = Lv.batches.size(); _mm_prefetch(&Lv.batches[0], _MM_HINT_T0); _mm_prefetch(&Lv.distances[0], _MM_HINT_T0); _mm_prefetch(&Lv.vertices[0], _MM_HINT_T0); _mm_prefetch(&dist_matrix[cand_root_id][0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { idi id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lv.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { inti dist = Lv.distances[dist_i].dist; if (dist >= iter) { // In a batch, the labels' distances are increasingly ordered. // If the half path distance is already greater than their targeted distance, jump to next batch break; } idi v_start_index = Lv.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lv.distances[dist_i].size; // _mm_prefetch(&dist_matrix[cand_root_id][0], _MM_HINT_T0); for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { idi v = Lv.vertices[v_i] + id_offset; // v is a label hub of v_id if (v >= cand_real_id) { // Vertex cand_real_id cannot have labels whose ranks are lower than it, // in which case dist_matrix[cand_root_id][v] does not exit. continue; } inti d_tmp = dist + dist_matrix[cand_root_id][v]; if (d_tmp <= iter) { // distance_query_time += WallTimer::get_time_mark(); // ++normal_hit_count; return false; } } } } // distance_query_time += WallTimer::get_time_mark(); return true; } // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_matrix; // but it only update the v_id's labels' vertices array; template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::insert_label_only( idi cand_root_id, idi v_id, idi roots_start, inti roots_size, vector<IndexType> &L, vector<vector<smalli> > &dist_matrix, smalli iter) { L[v_id].vertices.push_back(cand_root_id); // Update the distance buffer if necessary idi v_root_id = v_id - roots_start; if (v_id >= roots_start && v_root_id < roots_size) { dist_matrix[v_root_id][cand_root_id + roots_start] = iter; } } // Function updates those index arrays in v_id's label only if v_id has been inserted new labels template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::update_label_indices( idi v_id, idi inserted_count, vector<IndexType> &L, vector<ShortIndex> &short_index, idi b_id, smalli iter) { IndexType &Lv = L[v_id]; // indicator[BATCH_SIZE + 1] is true, means v got some labels already in this batch if (short_index[v_id].indicator[BATCH_SIZE]) { // Increase the batches' last element's size because a new distance element need to be added ++(Lv.batches.rbegin()->size); } else { // short_index[v_id].indicator.set(BATCH_SIZE); short_index[v_id].indicator[BATCH_SIZE] = 1; // Insert a new Batch with batch_id, start_index, and size because a new distance element need to be added Lv.batches.push_back(IndexType::Batch( b_id, Lv.distances.size(), 1)); } // Insert a new distance element with start_index, size, and dist Lv.distances.push_back(IndexType::DistanceIndexType( Lv.vertices.size() - inserted_count, inserted_count, iter)); } // Function to reset dist_matrix the distance buffer to INF // Traverse every root's labels to reset its distance buffer elements to INF. // In this way to reduce the cost of initialization of the next batch. template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::reset_at_end( idi roots_start, inti roots_size, vector<IndexType> &L, vector<vector<smalli> > &dist_matrix) { if (roots_size >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi r_id = 0; r_id < roots_size; ++r_id) { IndexType &Lr = L[r_id + roots_start]; inti b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { idi id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lr.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { idi v_start_index = Lr.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lr.distances[dist_i].size; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = SMALLI_MAX; } } } } } else { inti b_i_bound; idi id_offset; idi dist_start_index; idi dist_bound_index; idi v_start_index; idi v_bound_index; for (idi r_id = 0; r_id < roots_size; ++r_id) { IndexType &Lr = L[r_id + roots_start]; b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; dist_start_index = Lr.batches[b_i].start_index; dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { v_start_index = Lr.distances[dist_i].start_index; v_bound_index = v_start_index + Lr.distances[dist_i].size; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = SMALLI_MAX; } } } } } // inti b_i_bound; // idi id_offset; // idi dist_start_index; // idi dist_bound_index; // idi v_start_index; // idi v_bound_index; // for (idi r_id = 0; r_id < roots_size; ++r_id) { // IndexType &Lr = L[r_id + roots_start]; // b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // for (inti b_i = 0; b_i < b_i_bound; ++b_i) { // id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // dist_start_index = Lr.batches[b_i].start_index; // dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse dist_matrix // for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // v_start_index = Lr.distances[dist_i].start_index; // v_bound_index = v_start_index + Lr.distances[dist_i].size; // for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { // dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = SMALLI_MAX; // } // } // } // } } template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::batch_process( const Graph &G, idi b_id, idi roots_start, // start id of roots inti roots_size, // how many roots in the batch vector<IndexType> &L, const vector<uint8_t> &used_bp_roots, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &candidate_queue, idi &end_candidate_queue, vector<ShortIndex> &short_index, vector<vector<smalli> > &dist_matrix, vector<uint8_t> &got_candidates, vector<uint8_t> &is_active, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, vector<uint8_t> &once_candidated) //inline void ParaVertexCentricPLL::batch_process( // const Graph &G, // idi b_id, // idi roots_start, // start id of roots // inti roots_size, // how many roots in the batch // vector<IndexType> &L, // const vector<bool> &used_bp_roots) { // initializing_time -= WallTimer::get_time_mark(); // static const idi num_v = G.get_num_v(); // static vector<idi> active_queue(num_v); // static idi end_active_queue = 0; // static vector<idi> candidate_queue(num_v); // static idi end_candidate_queue = 0; // static vector<ShortIndex> short_index(num_v); // static vector< vector<smalli> > dist_matrix(roots_size, vector<smalli>(num_v, SMALLI_MAX)); // static uint8_t *got_candidates = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. // static uint8_t *is_active = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // static vector<idi> once_candidated_queue(num_v); // The vertex who got some candidates in this batch is in the once_candidated_queue. // static idi end_once_candidated_queue = 0; // static uint8_t *once_candidated = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. // At the beginning of a batch, initialize the labels L and distance buffer dist_matrix; // printf("initializing...\n");//test initialize( short_index, dist_matrix, active_queue, end_active_queue, once_candidated_queue, end_once_candidated_queue, once_candidated, b_id, roots_start, roots_size, L, used_bp_roots); smalli iter = 0; // The iterator, also the distance for current iteration // initializing_time += WallTimer::get_time_mark(); {//test // now_short_index.assign(short_index.begin(), short_index.end()); } while (0 != end_active_queue) { // candidating_time -= WallTimer::get_time_mark(); ++iter; {//test // tmp_short_index.swap(now_short_index); } // Pushing // printf("pushing...\n");//test { // Prepare for parallel processing the active_queue and adding to candidate_queue. // Every vertex's offset location in tmp_candidate_queue // It's used for every thread to write into tmp_candidate_queue and tmp_once_candidated_queue vector<idi> offsets_tmp_queue(end_active_queue); #pragma omp parallel for for (idi i_queue = 0; i_queue < end_active_queue; ++i_queue) { // Traverse all active vertices, get their out degrees. offsets_tmp_queue[i_queue] = G.out_degrees[active_queue[i_queue]]; } idi num_neighbors = prefix_sum_for_offsets(offsets_tmp_queue); // every thread writes to tmp_candidate_queue at its offset location vector<idi> tmp_candidate_queue(num_neighbors); // A vector to store the true number of pushed neighbors of every active vertex. vector<idi> sizes_tmp_candidate_queue(end_active_queue, 0); // similarly, every thread writes to tmp_once_candidated_queue at its offset location vector<idi> tmp_once_candidated_queue(num_neighbors); // And store the true number of new added once-candidated vertices. vector<idi> sizes_tmp_once_candidated_queue(end_active_queue, 0); // Traverse active vertices to push their labels as candidates // schedule dynamic is slower #pragma omp parallel for for (idi i_queue = 0; i_queue < end_active_queue; ++i_queue) { idi v_head = active_queue[i_queue]; is_active[v_head] = 0; // reset is_active push_labels( v_head, roots_start, G, L, short_index, // candidate_queue, // end_candidate_queue, tmp_candidate_queue, sizes_tmp_candidate_queue[i_queue], offsets_tmp_queue[i_queue], got_candidates, // once_candidated_queue, // end_once_candidated_queue, tmp_once_candidated_queue, sizes_tmp_once_candidated_queue[i_queue], once_candidated, used_bp_roots, iter); } {//test // now_short_index.assign(short_index.begin(), short_index.end()); } // According to sizes_tmp_candidate_queue, get the offset for inserting to the real queue idi total_new = prefix_sum_for_offsets(sizes_tmp_candidate_queue); // Collect all candidate vertices from tmp_candidate_queue into candidate_queue. collect_into_queue( tmp_candidate_queue, offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue sizes_tmp_candidate_queue, // the locations in queue for writing into queue. total_new, // total number of elements which need to be added from tmp_queue to queue candidate_queue, end_candidate_queue); // Get the offset for inserting to the real queue. total_new = prefix_sum_for_offsets(sizes_tmp_once_candidated_queue); // Collect all once-candidated vertices from tmp_once_candidated_queue into once_candidated_queue collect_into_queue( tmp_once_candidated_queue, offsets_tmp_queue, sizes_tmp_once_candidated_queue, total_new, once_candidated_queue, end_once_candidated_queue); // printf("end_candidate_queue: %u\n", end_candidate_queue); fflush(stdout);//test end_active_queue = 0; // Set the active_queue empty } // candidating_time += WallTimer::get_time_mark(); if (end_candidate_queue == 0) { break; } // adding_time -= WallTimer::get_time_mark(); // Adding // printf("adding...\n");//test { ////////////////////////////////////////////////////////////////////////////////// // OpenMP Version // Prepare for parallel processing the candidate_queue and adding to active_queue. // Every vertex's offset location in tmp_active_queue is i_queue * roots_size // It's used for every thread to write into tmp_candidate_queue and tmp_once_candidated_queue vector<idi> offsets_tmp_queue(end_candidate_queue); #pragma omp parallel for for (idi i_queue = 0; i_queue < end_candidate_queue; ++i_queue) { // Traverse all active vertices, get their out degrees. // A ridiculous bug here. The v_id will, if any, only add itself to the active queue. //offsets_tmp_queue[i_queue] = i_queue * roots_size; offsets_tmp_queue[i_queue] = i_queue; } // every thread writes to tmp_candidate_queue at its offset location vector<idi> tmp_active_queue(end_candidate_queue); // A vector to store the true number of pushed neighbors of every active vertex. vector<idi> sizes_tmp_active_queue(end_candidate_queue, 0); // Traverse vertices in the candidate_queue to insert labels // Here schedule dynamic will be slower //#ifdef PROFILE // cache_miss.measure_start(); //#endif #pragma omp parallel for schedule(dynamic) for (idi i_queue = 0; i_queue < end_candidate_queue; ++i_queue) { //#ifdef PROFILE // inti tid = omp_get_thread_num(); // thds_adding_time[tid] -= WallTimer::get_time_mark(); //#endif idi v_id = candidate_queue[i_queue]; inti inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id] = 0; // reset got_candidates inti bound_cand_i = short_index[v_id].end_candidates_que; for (inti cand_i = 0; cand_i < bound_cand_i; ++cand_i) { inti cand_root_id = short_index[v_id].candidates_que[cand_i]; short_index[v_id].is_candidate[cand_root_id] = 0; // Reset is_candidate // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if (distance_query( cand_root_id, v_id, roots_start, L, dist_matrix, iter)) { if (!is_active[v_id]) { is_active[v_id] = 1; tmp_active_queue[offsets_tmp_queue[i_queue] + sizes_tmp_active_queue[i_queue]++] = v_id; } // if (!be_active) { // be_active = true; // } // if (!is_active[v_id]) { // is_active[v_id] = true; // active_queue[end_active_queue++] = v_id; // } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only( cand_root_id, v_id, roots_start, roots_size, L, dist_matrix, iter); } } short_index[v_id].end_candidates_que = 0; // if (be_active) { // if (CAS(&is_active[v_id], (uint8_t) 0, (uint8_t) 1)) { // tmp_active_queue[offsets_tmp_queue[i_queue] + sizes_tmp_active_queue[i_queue]++] = v_id; // } // } if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id, inserted_count, L, short_index, b_id, iter); } } // According to sizes_tmp_active_queue, get the offset for inserting to the real queue idi total_new = prefix_sum_for_offsets(sizes_tmp_active_queue); // Collect all candidate vertices from tmp_candidate_queue into candidate_queue. collect_into_queue( tmp_active_queue, offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue sizes_tmp_active_queue, // the locations in queue for writing into queue. total_new, // total number of elements which need to be added from tmp_queue to queue active_queue, end_active_queue); end_candidate_queue = 0; // Set the candidate_queue empty ////////////////////////////////////////////////////////////////////////////////// ////// Sequential version // for (idi i_queue = 0; i_queue < end_candidate_queue; ++i_queue) { // idi v_id = candidate_queue[i_queue]; // inti inserted_count = 0; //recording number of v_id's truly inserted candidates // got_candidates[v_id] = false; // reset got_candidates // // Traverse v_id's all candidates // inti bound_cand_i = short_index[v_id].end_candidates_que; // for (inti cand_i = 0; cand_i < bound_cand_i; ++cand_i) { // inti cand_root_id = short_index[v_id].candidates_que[cand_i]; // short_index[v_id].is_candidate[cand_root_id] = false; // // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance // if ( distance_query( // cand_root_id, // v_id, // roots_start, // L, // dist_matrix, // iter) ) { // if (!is_active[v_id]) { // is_active[v_id] = true; // active_queue[end_active_queue++] = v_id; // } // ++inserted_count; // // The candidate cand_root_id needs to be added into v_id's label // insert_label_only( // cand_root_id, // v_id, // roots_start, // roots_size, // L, // dist_matrix, // iter); // } // } // short_index[v_id].end_candidates_que = 0; //// } // if (0 != inserted_count) { // // Update other arrays in L[v_id] if new labels were inserted in this iteration // update_label_indices( // v_id, // inserted_count, // L, // short_index, // b_id, // iter); // } // } // end_candidate_queue = 0; // Set the candidate_queue empty ////////////////////////////////////////////////////////////////////////////////////// } // adding_time += WallTimer::get_time_mark(); } // Reset the dist_matrix // initializing_time -= WallTimer::get_time_mark(); // init_dist_matrix_time -= WallTimer::get_time_mark(); reset_at_end( roots_start, roots_size, L, dist_matrix); // init_dist_matrix_time += WallTimer::get_time_mark(); // initializing_time += WallTimer::get_time_mark(); // double total_time = time_can + time_add; // printf("Candidating time: %f (%f%%)\n", time_can, time_can / total_time * 100); // printf("Adding time: %f (%f%%)\n", time_add, time_add / total_time * 100); } template<inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::construct(const Graph &G) { // initializing_time -= WallTimer::get_time_mark(); idi num_v = G.get_num_v(); num_v_ = num_v; L.resize(num_v); idi remainer = num_v % BATCH_SIZE; idi b_i_bound = num_v / BATCH_SIZE; // uint8_t *used_bp_roots = (uint8_t *) calloc(num_v, sizeof(uint8_t)); vector<uint8_t> used_bp_roots(num_v, 0); vector<idi> active_queue(num_v); idi end_active_queue = 0; vector<idi> candidate_queue(num_v); idi end_candidate_queue = 0; vector<ShortIndex> short_index(num_v); // vector<ShortIndex> short_index; short_index.resize(num_v); vector<vector<smalli> > dist_matrix(BATCH_SIZE, vector<smalli>(num_v, SMALLI_MAX)); // uint8_t *got_candidates = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. // uint8_t *is_active = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. vector<uint8_t> got_candidates(num_v, 0); vector<uint8_t> is_active(num_v, 0); vector<idi> once_candidated_queue( num_v); // The vertex who got some candidates in this batch is in the once_candidated_queue. idi end_once_candidated_queue = 0; // uint8_t *once_candidated = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. vector<uint8_t> once_candidated(num_v, 0); // initializing_time += WallTimer::get_time_mark(); double time_labeling = -WallTimer::get_time_mark(); //double bp_labeling_time = -WallTimer::get_time_mark(); // printf("BP labeling...\n"); //test bit_parallel_labeling( G, L, used_bp_roots); //bp_labeling_time += WallTimer::get_time_mark(); for (idi b_i = 0; b_i < b_i_bound; ++b_i) { // printf("b_i: %u\n", b_i);//test batch_process( G, b_i, b_i * BATCH_SIZE, BATCH_SIZE, L, used_bp_roots, active_queue, end_active_queue, candidate_queue, end_candidate_queue, short_index, dist_matrix, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); // batch_process( // G, // b_i, // b_i * BATCH_SIZE, // BATCH_SIZE, // L, // used_bp_roots); } if (remainer != 0) { // printf("b_i: %u the last batch\n", b_i_bound);//test batch_process( G, b_i_bound, b_i_bound * BATCH_SIZE, remainer, L, used_bp_roots, active_queue, end_active_queue, candidate_queue, end_candidate_queue, short_index, dist_matrix, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); // batch_process( // G, // b_i_bound, // b_i_bound * BATCH_SIZE, // remainer, // L, // used_bp_roots); } time_labeling += WallTimer::get_time_mark(); // free(got_candidates); // free(is_active); // free(once_candidated); // free(used_bp_roots); // Test printf("Threads: %u Batch_size: %u\n", NUM_THREADS, BATCH_SIZE); //printf("BP_labeling: %.2f %.2f%%\n", bp_labeling_time, bp_labeling_time / time_labeling * 100); printf("BP_Roots_Size: %u\n", BITPARALLEL_SIZE); // printf("Initializing: %.2f %.2f%%\n", initializing_time, initializing_time / time_labeling * 100); // printf("\tinit_start_reset_time: %f (%f%%)\n", init_start_reset_time, init_start_reset_time / initializing_time * 100); // printf("\tinit_index_time: %f (%f%%)\n", init_index_time, init_index_time / initializing_time * 100); // printf("\t\tinit_indicators_time: %f (%f%%)\n", init_indicators_time, init_indicators_time / init_index_time * 100); // printf("\tinit_dist_matrix_time: %f (%f%%)\n", init_dist_matrix_time, init_dist_matrix_time / initializing_time * 100); // printf("Candidating: %.2f %.2f%%\n", candidating_time, candidating_time / time_labeling * 100); // printf("Adding: %.2f %.2f%%\n", adding_time, adding_time / time_labeling * 100); // printf("\tdistance_query_time: %f (%f%%)\n", distance_query_time, distance_query_time / adding_time * 100); // printf("\ttotal_check_count: %llu\n", total_check_count); // printf("\tbp_hit_count (to total_check): %llu (%f%%)\n", // bp_hit_count, // bp_hit_count * 100.0 / total_check_count); // printf("\tnormal_hit_count (to total_check, to normal_check): %llu (%f%%, %f%%)\n", // normal_hit_count, // normal_hit_count * 100.0 / total_check_count, // normal_hit_count * 100.0 / (total_check_count - bp_hit_count)); #ifdef PROFILE uint64_t total_thds_adding_count = 0; double total_thds_adding_time = 0; for (inti tid = 0; tid < NUM_THREADS; ++tid) { total_thds_adding_count += thds_adding_count[tid]; total_thds_adding_time += thds_adding_time[tid]; } printf("Threads_adding_count:"); for (inti tid = 0; tid < NUM_THREADS; ++tid) { printf(" %lu(%.2f%%)", thds_adding_count[tid], thds_adding_count[tid] * 100.0 / total_thds_adding_count); } puts(""); printf("Threads_adding_time:"); for (inti tid = 0; tid < NUM_THREADS; ++tid) { printf(" %f(%.2f%%)", thds_adding_time[tid], thds_adding_time[tid] * 100.0 / total_thds_adding_time); } puts(""); //printf("Threads_adding_average_time:"); //for (inti tid = 0; tid < NUM_THREADS; ++tid) { // printf(" %f", thds_adding_time[tid] / thds_adding_count[tid]); //} puts(""); cache_miss.print(); #endif { printf("Total_labeling_time: %.2f seconds bp_hit_count: %'lu\n", time_labeling, bp_hit_count); } // printf("Total_labeling_time: %.2f seconds\n", time_labeling); // End test } // Function to get the prefix sum of elements in offsets template<inti BATCH_SIZE> inline idi ParaVertexCentricPLL<BATCH_SIZE>::prefix_sum_for_offsets( vector<idi> &offsets) { idi size_offsets = offsets.size(); if (1 == size_offsets) { idi tmp = offsets[0]; offsets[0] = 0; return tmp; } else if (size_offsets < 2048) { idi offset_sum = 0; idi size = size_offsets; for (idi i = 0; i < size; ++i) { idi tmp = offsets[i]; offsets[i] = offset_sum; offset_sum += tmp; } return offset_sum; } else { // Parallel Prefix Sum, based on Guy E. Blelloch's Prefix Sums and Their Applications idi last_element = offsets[size_offsets - 1]; // idi size = 1 << ((idi) log2(size_offsets - 1) + 1); idi size = 1 << ((idi) log2(size_offsets)); // vector<idi> nodes(size, 0); idi tmp_element = offsets[size - 1]; //#pragma omp parallel for // for (idi i = 0; i < size_offsets; ++i) { // nodes[i] = offsets[i]; // } // Up-Sweep (Reduce) Phase idi log2size = log2(size); for (idi d = 0; d < log2size; ++d) { idi by = 1 << (d + 1); #pragma omp parallel for for (idi k = 0; k < size; k += by) { offsets[k + (1 << (d + 1)) - 1] += offsets[k + (1 << d) - 1]; } } // Down-Sweep Phase offsets[size - 1] = 0; for (idi d = log2(size) - 1; d != (idi) -1; --d) { idi by = 1 << (d + 1); #pragma omp parallel for for (idi k = 0; k < size; k += by) { idi t = offsets[k + (1 << d) - 1]; offsets[k + (1 << d) - 1] = offsets[k + (1 << (d + 1)) - 1]; offsets[k + (1 << (d + 1)) - 1] += t; } } //#pragma omp parallel for // for (idi i = 0; i < size_offsets; ++i) { // offsets[i] = nodes[i]; // } if (size != size_offsets) { idi tmp_sum = offsets[size - 1] + tmp_element; for (idi i = size; i < size_offsets; ++i) { idi t = offsets[i]; offsets[i] = tmp_sum; tmp_sum += t; } } return offsets[size_offsets - 1] + last_element; } // // Get the offset as the prefix sum of out degrees // idi offset_sum = 0; // idi size = offsets.size(); // for (idi i = 0; i < size; ++i) { // idi tmp = offsets[i]; // offsets[i] = offset_sum; // offset_sum += tmp; // } // return offset_sum; //// Parallel Prefix Sum, based on Guy E. Blelloch's Prefix Sums and Their Applications // idi size_offsets = offsets.size(); // idi last_element = offsets[size_offsets - 1]; //// idi size = 1 << ((idi) log2(size_offsets - 1) + 1); // idi size = 1 << ((idi) log2(size_offsets)); //// vector<idi> nodes(size, 0); // idi tmp_element = offsets[size - 1]; ////#pragma omp parallel for //// for (idi i = 0; i < size_offsets; ++i) { //// nodes[i] = offsets[i]; //// } // // // Up-Sweep (Reduce) Phase // idi log2size = log2(size); // for (idi d = 0; d < log2size; ++d) { // idi by = 1 << (d + 1); //#pragma omp parallel for // for (idi k = 0; k < size; k += by) { // offsets[k + (1 << (d + 1)) - 1] += offsets[k + (1 << d) - 1]; // } // } // // // Down-Sweep Phase // offsets[size - 1] = 0; // for (idi d = log2(size) - 1; d != (idi) -1 ; --d) { // idi by = 1 << (d + 1); //#pragma omp parallel for // for (idi k = 0; k < size; k += by) { // idi t = offsets[k + (1 << d) - 1]; // offsets[k + (1 << d) - 1] = offsets[k + (1 << (d + 1)) - 1]; // offsets[k + (1 << (d + 1)) - 1] += t; // } // } // ////#pragma omp parallel for //// for (idi i = 0; i < size_offsets; ++i) { //// offsets[i] = nodes[i]; //// } // if (size != offsets.size()) { // idi tmp_sum = offsets[size - 1] + tmp_element; // idi i_bound = offsets.size(); // for (idi i = size; i < i_bound; ++i) { // idi t = offsets[i]; // offsets[i] = tmp_sum; // tmp_sum += t; // } // } // // return offsets[size_offsets - 1] + last_element; } // Collect elements in the tmp_queue into the queue template<inti BATCH_SIZE> template<typename T> inline void ParaVertexCentricPLL<BATCH_SIZE>::collect_into_queue( // vector<idi> &tmp_queue, vector<T> &tmp_queue, vector<idi> &offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue vector<idi> &offsets_queue, // the locations in queue for writing into queue. idi num_elements, // total number of elements which need to be added from tmp_queue to queue // vector<idi> &queue, vector<T> &queue, idi &end_queue) { if (0 == num_elements) { return; } idi i_bound = offsets_tmp_queue.size(); #pragma omp parallel for for (idi i = 0; i < i_bound; ++i) { idi i_q_start = end_queue + offsets_queue[i]; idi i_q_bound; if (i_bound - 1 != i) { i_q_bound = end_queue + offsets_queue[i + 1]; } else { i_q_bound = end_queue + num_elements; } if (i_q_start == i_q_bound) { // If the group has no elements to be added, then continue to the next group continue; } idi end_tmp = offsets_tmp_queue[i]; for (idi i_q = i_q_start; i_q < i_q_bound; ++i_q) { queue[i_q] = tmp_queue[end_tmp++]; } } end_queue += num_elements; } // Function: thread-save enqueue. The queue has enough size already. An index points the end of the queue. template<inti BATCH_SIZE> template<typename T, typename Int> inline void ParaVertexCentricPLL<BATCH_SIZE>::TS_enqueue( vector<T> &queue, Int &end_queue, const T &e) { volatile Int old_i = end_queue; volatile Int new_i = old_i + 1; while (!CAS(&end_queue, old_i, new_i)) { old_i = end_queue; new_i = old_i + 1; } queue[old_i] = e; } template<inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::store_index_to_file( const char *filename, const vector<idi> &rank) { // TODO: fout comment out // std::ofstream fout(filename); // if (!fout.is_open()) { // fprintf(stderr, "Error: cannot open file %s\n", filename); // exit(EXIT_FAILURE); // } // std::string txt_filename = std::string(filename) + ".txt";//test // std::ofstream txt_out(txt_filename.c_str()); // Store into file the number of vertices and the number of bit-parallel roots. uint64_t labels_count = 0; // fout.write((char *) &num_v_, sizeof(num_v_)); // fout.write((char *) &BITPARALLEL_SIZE, sizeof(BITPARALLEL_SIZE)); for (idi v_id = 0; v_id < num_v_; ++v_id) { idi v_rank = rank[v_id]; const IndexType &Lv = L[v_rank]; idi size_labels = Lv.vertices.size(); labels_count += size_labels; // // Store Bit-parallel Labels into file. // for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { // weighti d = Lv.bp_dist[b_i]; // uint64_t s0 = Lv.bp_sets[b_i][0]; // uint64_t s1 = Lv.bp_sets[b_i][1]; // fout.write((char *) &d, sizeof(d)); // fout.write((char *) &s0, sizeof(s0)); // fout.write((char *) &s1, sizeof(s1)); // } vector<std::pair<idi, weighti> > ordered_labels; // Traverse v_id's all existing labels for (inti b_i = 0; b_i < Lv.batches.size(); ++b_i) { idi id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lv.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { idi v_start_index = Lv.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lv.distances[dist_i].size; weighti dist = Lv.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { idi tail = Lv.vertices[v_i] + id_offset; ordered_labels.push_back(std::make_pair(tail, dist)); } } } // Sort sort(ordered_labels.begin(), ordered_labels.end()); // // Store into file // fout.write((char *) &size_labels, sizeof(size_labels)); for (idi l_i = 0; l_i < size_labels; ++l_i) { idi l = ordered_labels[l_i].first; weighti d = ordered_labels[l_i].second; // fout.write((char *) &l, sizeof(l)); // fout.write((char *) &d, sizeof(d)); // {//test // txt_out << v_id << " " << v_rank << ": " << l << " " << (idi) d << std::endl; // } } } printf("Label_size: %'lu mean: %f\n", labels_count, static_cast<double>(labels_count) / num_v_); // fout.close(); } template<inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::load_index_from_file( const char *filename) { std::ifstream fin(filename); if (!fin.is_open()) { fprintf(stderr, "Error: cannot open file %s\n", filename); exit(EXIT_FAILURE); } idi num_v; // Load from file the number of vertices and the number of bit-parallel roots. fin.read((char *) &num_v, sizeof(num_v)); fin.read((char *) &BITPARALLEL_SIZE, sizeof(BITPARALLEL_SIZE)); num_v_ = num_v; Index.resize(num_v); uint64_t labels_count = 0; // Load labels for every vertex for (idi v_id = 0; v_id < num_v; ++v_id) { IndexOrdered &Iv = Index[v_id]; // Load Bit-parallel Labels from file. for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { fin.read((char *) &Iv.bp_dist[b_i], sizeof(Iv.bp_dist[b_i])); fin.read((char *) &Iv.bp_sets[b_i][0], sizeof(Iv.bp_sets[b_i][0])); fin.read((char *) &Iv.bp_sets[b_i][1], sizeof(Iv.bp_sets[b_i][1])); } // Normal Labels // Load Labels from file. idi size_labels; fin.read((char *) &size_labels, sizeof(size_labels)); labels_count += size_labels; Iv.label_id.resize(size_labels + 1); Iv.label_dists.resize(size_labels + 1); for (idi l_i = 0; l_i < size_labels; ++l_i) { fin.read((char *) &Iv.label_id[l_i], sizeof(Iv.label_id[l_i])); fin.read((char *) &Iv.label_dists[l_i], sizeof(Iv.label_dists[l_i])); } Iv.label_id[size_labels] = num_v; // Sentinel Iv.label_dists[size_labels] = (weighti) -1; // Sentinel } printf("Label_size_loaded: %'lu mean: %f\n", labels_count, static_cast<double>(labels_count) / num_v); fin.close(); } template<inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::order_labels( const vector<idi> &rank2id, const vector<idi> &rank) { idi num_v = rank.size(); vector<vector<pair < idi, weighti> > > ordered_L(num_v); idi labels_count = 0; Index.resize(num_v); // Traverse the L, put them into Index (ordered labels) for (idi v_id = 0; v_id < num_v; ++v_id) { idi new_v = rank2id[v_id]; IndexOrdered &Iv = Index[new_v]; const IndexType &Lv = L[v_id]; auto &OLv = ordered_L[new_v]; // Bit-parallel Labels memcpy(&Iv.bp_dist, &Lv.bp_dist, BITPARALLEL_SIZE * sizeof(weighti)); for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { memcpy(&Iv.bp_sets[b_i], &Lv.bp_sets[b_i], 2 * sizeof(uint64_t)); } // Normal Labels // Traverse v_id's all existing labels for (inti b_i = 0; b_i < Lv.batches.size(); ++b_i) { idi id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lv.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { idi v_start_index = Lv.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lv.distances[dist_i].size; inti dist = Lv.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { idi tail = Lv.vertices[v_i] + id_offset; // idi new_tail = rank2id[tail]; // new_L[new_v].push_back(make_pair(new_tail, dist)); OLv.push_back(std::make_pair(tail, dist)); } } } // Sort sort(OLv.begin(), OLv.end()); // Store into Index inti size_labels = OLv.size(); labels_count += size_labels; Iv.label_id.resize(size_labels + 1); // Adding one for Sentinel Iv.label_dists.resize(size_labels + 1); // Adding one for Sentinel for (inti l_i = 0; l_i < size_labels; ++l_i) { Iv.label_id[l_i] = OLv[l_i].first; Iv.label_dists[l_i] = OLv[l_i].second; } Iv.label_id[size_labels] = num_v; // Sentinel Iv.label_dists[size_labels] = WEIGHTI_MAX; // Sentinel } printf("Label_size: %u mean: %f\n", labels_count, static_cast<double>(labels_count) / num_v); // // Test // { // puts("Asserting..."); // for (idi v_id = 0; v_id < num_v; ++v_id) { // const IndexType &Lv = L[v_id]; // const IndexOrdered &Iv = Index[rank2id[v_id]]; // // Bit-parallel Labels // for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { // assert(Lv.bp_dist[b_i] == Iv.bp_dist[b_i]); // assert(Lv.bp_sets[b_i][0] == Iv.bp_sets[b_i][0]); // assert(Lv.bp_sets[b_i][1] == Iv.bp_sets[b_i][1]); // } // // Normal Labels // assert(Lv.vertices.size() == Iv.label_id.size()); // assert(Lv.vertices.size() == Iv.label_dists.size()); //// { //// inti bound_i = Iv.label_id.size() > 10 ? 10 : Iv.label_id.size(); //// printf("V %u:", rank2id[v_id]); //// for (inti i = 0; i < bound_i; ++i) { //// printf(" (%u, %u)", Iv.label_id[i], Iv.label_dists[i]); //// } //// puts(""); //// } // // } // puts("Asserted."); // } } template<inti BATCH_SIZE> weighti ParaVertexCentricPLL<BATCH_SIZE>::query_distance( idi a, idi b) { idi num_v = num_v_; if (a >= num_v || b >= num_v) { return a == b ? 0 : WEIGHTI_MAX; } // // A is shorter than B // IndexOrdered &Ia = (Index[a].label_id.size() < Index[b].label_id.size()) ? Index[a] : Index[b]; // IndexOrdered &Ib = (Index[a].label_id.size() < Index[b].label_id.size()) ? Index[b] : Index[a]; // // A is longer than B // IndexOrdered &Ia = (Index[a].label_id.size() > Index[b].label_id.size()) ? Index[a] : Index[b]; // IndexOrdered &Ib = (Index[a].label_id.size() > Index[b].label_id.size()) ? Index[b] : Index[a]; IndexOrdered &Ia = Index[a]; IndexOrdered &Ib = Index[b]; // const IndexOrdered &Ia = Index[a]; // const IndexOrdered &Ib = Index[b]; inti d = WEIGHTI_MAX; _mm_prefetch(&Ia.label_id[0], _MM_HINT_T0); _mm_prefetch(&Ib.label_id[0], _MM_HINT_T0); _mm_prefetch(&Ia.label_dists[0], _MM_HINT_T0); _mm_prefetch(&Ib.label_dists[0], _MM_HINT_T0); // Bit-Parallel Labels for (int i = 0; i < BITPARALLEL_SIZE; ++i) { int td = Ia.bp_dist[i] + Ib.bp_dist[i]; if (td - 2 <= d) { td += (Ia.bp_sets[i][0] & Ib.bp_sets[i][0]) ? -2 : ((Ia.bp_sets[i][0] & Ib.bp_sets[i][1]) | (Ia.bp_sets[i][1] & Ib.bp_sets[i][0])) ? -1 : 0; if (td < d) { d = td; } } } // Normal Labels (ordered) // // Vectorizaed Version // vector<idi> &A = Ia.label_id; // vector<idi> &B = Ib.label_id; // idi len_B = B.size() - 1; //// idi len_B = B.size(); // idi bound_b_base_i = len_B - (len_B % NUM_P_INT); // idi a_i = 0; // idi b_base_i = 0; // idi len_A = A.size() - 1; //// idi len_A = A.size(); // ++length_larger_than_16.second; // if (len_B >= 16) { // ++length_larger_than_16.first; // } // while (a_i < len_A && b_base_i < bound_b_base_i) { // int a = A[a_i]; // __m512i a_v = _mm512_set1_epi32(a); // // // Packed b // __m512i b_v = _mm512_loadu_epi32(&B[b_base_i]); // @suppress("Function cannot be resolved") // __mmask16 is_equal_m = _mm512_cmpeq_epi32_mask(a_v, b_v); // if (is_equal_m) { //// if (a == num_v) { //// break; // Sentinel //// } // inti td = Ia.label_dists[a_i] + Ib.label_dists[b_base_i + (idi) (log2(is_equal_m))]; // if (td < d) { // d = td; // } // // // Advance index // if (is_equal_m & (__mmask16) 0x8000) { // ++a_i; // b_base_i += NUM_P_INT; // } else { // a_i += (a < B[b_base_i + NUM_P_INT - 1]) ? 1 : 0; // b_base_i += (B[b_base_i + NUM_P_INT - 1] < a) ? NUM_P_INT : 0; // } // } else { // // Advance index // a_i += (a < B[b_base_i + NUM_P_INT - 1]) ? 1 : 0; // b_base_i += (B[b_base_i + NUM_P_INT - 1] < a) ? NUM_P_INT : 0; // } // } // while (a_i < len_A && b_base_i < len_B) { // if (A[a_i] == B[b_base_i]) { //// if (a == num_v) { //// break; // Sentinel //// } // inti td = Ia.label_dists[a_i] + Ib.label_dists[b_base_i]; // if (td < d) { // d = td; // } // // // Advance index // ++a_i; // ++b_base_i; // } else { // // Advance index // a_i += (A[a_i] < B[b_base_i]) ? 1 : 0; // b_base_i += (B[b_base_i] < A[a_i]) ? 1 : 0; // } // } // Sequential Version for (idi i1 = 0, i2 = 0;;) { idi v1 = Ia.label_id[i1], v2 = Ib.label_id[i2]; if (v1 == v2) { if (v1 == num_v) { break; // Sentinel } inti td = Ia.label_dists[i1] + Ib.label_dists[i2]; if (td < d) { d = td; } ++i1; ++i2; } else { i1 += v1 < v2 ? 1 : 0; i2 += v1 > v2 ? 1 : 0; } } if (d >= WEIGHTI_MAX - 2) { d = WEIGHTI_MAX; } return d; } template<inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::switch_labels_to_old_id( const vector<idi> &rank2id, const vector<idi> &rank) { idi label_sum = 0; idi test_label_sum = 0; // idi num_v = rank2id.size(); idi num_v = rank.size(); vector<vector<pair < idi, weighti> > > new_L(num_v); // for (idi r = 0; r < num_v; ++r) { // idi v = rank2id[r]; // const IndexType &Lr = L[r]; // IndexType &Lv = new_L[v]; // idi size = Lr.get_size(); // label_sum += size; // for (idi li = 0; li < size; ++li) { // idi l = Lr.get_label_ith_v(li); // idi new_l = rank2id[l]; // Lv.add_label_seq(new_l, Lr.get_label_ith_d(li)); // } // } // L = new_L; for (idi v_id = 0; v_id < num_v; ++v_id) { idi new_v = rank2id[v_id]; const IndexType &Lv = L[v_id]; // Traverse v_id's all existing labels for (inti b_i = 0; b_i < Lv.batches.size(); ++b_i) { idi id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lv.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { label_sum += Lv.distances[dist_i].size; idi v_start_index = Lv.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lv.distances[dist_i].size; inti dist = Lv.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { idi tail = Lv.vertices[v_i] + id_offset; // idi new_tail = rank2id[tail]; // new_L[new_v].push_back(make_pair(new_tail, dist)); new_L[new_v].push_back(std::make_pair(tail, dist)); ++test_label_sum; } } } } printf("Label sum: %u %u mean: %f\n", label_sum, test_label_sum, label_sum * 1.0 / num_v); // // Try to print // for (idi v = 0; v < num_v; ++v) { // const auto &Lv = new_L[v]; // idi size = Lv.size(); // printf("Vertex %u (Size %u):", v, size); // for (idi i = 0; i < size; ++i) { // printf(" (%u, %d)", Lv[i].first, Lv[i].second); // fflush(stdout); // } // puts(""); // } // // Try query // idi u; // idi v; // while (std::cin >> u >> v) { // weighti dist = WEIGHTI_MAX; // // Bit Parallel Check // const IndexType &idx_u = L[rank[u]]; // const IndexType &idx_v = L[rank[v]]; // // for (inti i = 0; i < BITPARALLEL_SIZE; ++i) { // int td = idx_v.bp_dist[i] + idx_u.bp_dist[i]; // if (td - 2 <= dist) { // td += // (idx_v.bp_sets[i][0] & idx_u.bp_sets[i][0]) ? -2 : // ((idx_v.bp_sets[i][0] & idx_u.bp_sets[i][1]) // | (idx_v.bp_sets[i][1] & idx_u.bp_sets[i][0])) // ? -1 : 0; // if (td < dist) { // dist = td; // } // } // } // // // Normal Index Check // const auto &Lu = new_L[u]; // const auto &Lv = new_L[v]; //// unsorted_map<idi, weighti> markers; // map<idi, weighti> markers; // for (idi i = 0; i < Lu.size(); ++i) { // markers[Lu[i].first] = Lu[i].second; // } // for (idi i = 0; i < Lv.size(); ++i) { // const auto &tmp_l = markers.find(Lv[i].first); // if (tmp_l == markers.end()) { // continue; // } // int d = tmp_l->second + Lv[i].second; // if (d < dist) { // dist = d; // } // } // if (dist == 255) { // printf("2147483647\n"); // } else { // printf("%u\n", dist); // } // } } } #endif /* INCLUDES_PADO_H_ */
GB_serialize_array.c
//------------------------------------------------------------------------------ // GB_serialize_array: serialize an array, with optional compression //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Parallel compression method for an array. The array is compressed into // a sequence of independently allocated blocks, or returned as-is if not // compressed. Currently, only LZ4 is supported. #include "GB.h" #include "GB_serialize.h" #include "GB_lz4.h" #define GB_FREE_ALL \ { \ GB_FREE (&Sblocks, Sblocks_size) ; \ GB_serialize_free_blocks (&Blocks, Blocks_size, nblocks, Context) ; \ } GrB_Info GB_serialize_array ( // output: GB_blocks **Blocks_handle, // Blocks: array of size nblocks+1 size_t *Blocks_size_handle, // size of Blocks int64_t **Sblocks_handle, // Sblocks: array of size nblocks+1 size_t *Sblocks_size_handle, // size of Sblocks int32_t *nblocks_handle, // # of blocks int32_t *method_used, // method used size_t *compressed_size, // size of compressed block, or upper // bound if dryrun is true // input: bool dryrun, // if true, just esimate the size GB_void *X, // input array of size len int64_t len, // size of X, in bytes int32_t method, // compression method requested bool intel, // if true, use Intel IPPS int32_t algo, // compression algorithm int32_t level, // compression level GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT (Blocks_handle != NULL) ; ASSERT (Blocks_size_handle != NULL) ; ASSERT (Sblocks_handle != NULL) ; ASSERT (Sblocks_size_handle != NULL) ; ASSERT (nblocks_handle != NULL) ; ASSERT (method_used != NULL) ; ASSERT (compressed_size != NULL) ; GB_blocks *Blocks = NULL ; size_t Blocks_size = 0, Sblocks_size = 0 ; int32_t nblocks = 0 ; int64_t *Sblocks = NULL ; //-------------------------------------------------------------------------- // check for quick return //-------------------------------------------------------------------------- (*Blocks_handle) = NULL ; (*Blocks_size_handle) = 0 ; (*Sblocks_handle) = NULL ; (*Sblocks_size_handle) = 0 ; (*nblocks_handle) = 0 ; (*method_used) = GxB_COMPRESSION_NONE ; (*compressed_size) = 0 ; if (X == NULL || len == 0) { // input array is empty return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // check for no compression //-------------------------------------------------------------------------- if (method <= GxB_COMPRESSION_NONE || len < 256) { // no compression, return result as a single block (plus the sentinel) if (!dryrun) { Blocks = GB_MALLOC (2, GB_blocks, &Blocks_size) ; Sblocks = GB_MALLOC (2, int64_t, &Sblocks_size) ; if (Blocks == NULL || Sblocks == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } Blocks [0].p = X ; // first block is all of the array X Blocks [0].p_size = 0 ; // denotes that p is a shallow copy of X Sblocks [0] = 0 ; // start of first block Blocks [1].p = NULL ; // 2nd block is the final sentinel Blocks [1].p_size = 0 ; Sblocks [1] = len ; // first block ends at len-1 (*Blocks_handle) = Blocks ; (*Blocks_size_handle) = Blocks_size ; (*Sblocks_handle) = Sblocks ; (*Sblocks_size_handle) = Sblocks_size ; } (*compressed_size) = len ; (*nblocks_handle) = 1 ; return (GrB_SUCCESS) ; } (*method_used) = method ; //-------------------------------------------------------------------------- // determine # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (len, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // determine # of blocks and allocate them //-------------------------------------------------------------------------- // divide the array into blocks, 4 per thread, or a single block if 1 thread int64_t blocksize = (nthreads == 1) ? len : GB_ICEIL (len, 4*nthreads) ; // ensure the blocksize does not exceed the LZ4 maximum ASSERT (LZ4_MAX_INPUT_SIZE < INT32_MAX) ; blocksize = GB_IMIN (blocksize, LZ4_MAX_INPUT_SIZE/2) ; // ensure the blocksize is not too small blocksize = GB_IMAX (blocksize, (64*1024)) ; // determine the final # of blocks nblocks = GB_ICEIL (len, blocksize) ; nthreads = GB_IMIN (nthreads, nblocks) ; (*nblocks_handle) = nblocks ; // allocate the output Blocks: one per block plus the sentinel block if (!dryrun) { Blocks = GB_CALLOC (nblocks+1, GB_blocks, &Blocks_size) ; Sblocks = GB_CALLOC (nblocks+1, int64_t, &Sblocks_size) ; if (Blocks == NULL || Sblocks == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } } // allocate the blocks, one at a time int32_t blockid ; bool ok = true ; for (blockid = 0 ; blockid < nblocks && ok ; blockid++) { // allocate a single block for the compression of X [kstart:kend-1] int64_t kstart, kend ; GB_PARTITION (kstart, kend, len, blockid, nblocks) ; size_t uncompressed = kend - kstart ; ASSERT (uncompressed < INT32_MAX) ; ASSERT (uncompressed > 0) ; size_t s = (size_t) LZ4_compressBound ((int) uncompressed) ; ASSERT (s < INT32_MAX) ; size_t p_size = 0 ; if (dryrun) { // do not allocate the block; just sum up the upper bound sizes (*compressed_size) += s ; } else { // allocate the block GB_void *p = GB_MALLOC (s, GB_void, &p_size) ; ok = (p != NULL) ; Blocks [blockid].p = p ; Blocks [blockid].p_size = p_size ; } } if (dryrun) { // GrB_Matrix_serializeSize: no more work to do. (*compressed_size) is // an upper bound of the blob_size required when the matrix is // compressed, and (*nblocks_handle) is the number of blocks to be used. // No space has been allocated. return (GrB_SUCCESS) ; } if (!ok) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // compress the blocks in parallel //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic) \ reduction(&&:ok) for (blockid = 0 ; blockid < nblocks ; blockid++) { // compress X [kstart:kend-1] into Blocks [blockid].p int64_t kstart, kend ; GB_PARTITION (kstart, kend, len, blockid, nblocks) ; const char *src = (const char *) (X + kstart) ; // source char *dst = (char *) Blocks [blockid].p ; // destination int srcSize = (int) (kend - kstart) ; // size of source size_t dsize = Blocks [blockid].p_size ; // size of dest int dstCapacity = GB_IMIN (dsize, INT32_MAX) ; int s ; switch (algo) { default : case GxB_COMPRESSION_LZ4 : s = LZ4_compress_default (src, dst, srcSize, dstCapacity) ; break ; case GxB_COMPRESSION_LZ4HC : s = LZ4_compress_HC (src, dst, srcSize, dstCapacity, level) ; break ; } ok = ok && (s > 0) ; // compressed block is now in dst [0:s-1], of size s Sblocks [blockid] = (int64_t) s ; } if (!ok) { // compression failure: this can "never" occur GB_FREE_ALL ; return (GrB_INVALID_OBJECT) ; // TODO: find a better error code } //-------------------------------------------------------------------------- // compute cumulative sum of the compressed blocks //-------------------------------------------------------------------------- GB_cumsum (Sblocks, nblocks, NULL, 1, Context) ; //-------------------------------------------------------------------------- // free workspace return result //-------------------------------------------------------------------------- (*Blocks_handle) = Blocks ; (*Blocks_size_handle) = Blocks_size ; (*Sblocks_handle) = Sblocks ; (*Sblocks_size_handle) = Sblocks_size ; (*compressed_size) = Sblocks [nblocks] ; // actual size of the blob return (GrB_SUCCESS) ; }
BenchUtils.h
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #pragma once #include <chrono> #include <functional> #include <vector> #include <immintrin.h> #ifdef _OPENMP #include <omp.h> #endif #include "./AlignedVec.h" namespace fbgemm { template <typename T> void randFill(aligned_vector<T>& vec, T low, T high); aligned_vector<float> getRandomSparseVector( unsigned size, float fractionNonZeros = 1.0); void llc_flush(std::vector<char>& llc); // Same as omp_get_max_threads() when OpenMP is available, otherwise 1 int fbgemm_get_max_threads(); // Same as omp_get_num_threads() when OpenMP is available, otherwise 1 int fbgemm_get_num_threads(); // Same as omp_get_thread_num() when OpenMP is available, otherwise 0 int fbgemm_get_thread_num(); template <typename T> void cache_evict(const T& vec) { auto const size = vec.size(); auto const elemSize = sizeof(typename T::value_type); auto const dataSize = size * elemSize; const char* data = reinterpret_cast<const char*>(vec.data()); constexpr int CACHE_LINE_SIZE = 64; for (std::size_t i = 0; i < dataSize; i += CACHE_LINE_SIZE) { _mm_clflush(&data[i]); } } /** * Parse application command line arguments * */ int parseArgumentInt( int argc, const char* argv[], const char* arg, int non_exist_val, int def_val); bool parseArgumentBool( int argc, const char* argv[], const char* arg, bool def_val); namespace { struct empty_flush { void operator()() const {} }; } // namespace /** * @param Fn functor to execute * @param Fe data eviction functor */ template <class Fn, class Fe = std::function<void()>> double measureWithWarmup( Fn&& fn, int warmupIterations, int measuredIterations, const Fe& fe = empty_flush(), bool useOpenMP = false) { for (int i = 0; i < warmupIterations; ++i) { // Evict data first fe(); fn(); } double ttot = 0.0; #ifdef _OPENMP #pragma omp parallel if (useOpenMP) #endif for (int i = 0; i < measuredIterations; ++i) { int thread_id = 0; std::chrono::time_point<std::chrono::high_resolution_clock> start, end; #ifdef _OPENMP if (useOpenMP) { thread_id = omp_get_thread_num(); } #endif if (thread_id == 0) { fe(); } #ifdef _OPENMP if (useOpenMP) { #pragma omp barrier } #endif start = std::chrono::high_resolution_clock::now(); fn(); end = std::chrono::high_resolution_clock::now(); auto dur = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start); if (thread_id == 0) { // TODO: measure load imbalance ttot += dur.count(); } } return ttot / 1e9 / measuredIterations; } } // namespace fbgemm
main.c
#include <assert.h> #include <math.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/time.h> #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" #ifndef min #define min(a, b) (((a) < (b)) ? (a) : (b)) #endif #define min3(x, y, z) min(min(x, y), z) #ifndef max #define max(a, b) (((a) > (b)) ? (a) : (b)) #endif #define max3(x, y, z) max(max(x, y), z) #define CLAMP2BYTE(v) (((unsigned) (v)) < 255 ? (v) : (v < 0) ? 0 : 255) unsigned int detect(uint8_t *pixel, uint8_t **plane, int width, int height, int channels) { int stride = width * channels; int last_col = width * channels - channels; int last_row = height * stride - stride; unsigned int sum = 0; for (int y = 0; y < height; y++) { int cur_row = stride * y; int next_row = min(cur_row + stride, last_row); uint8_t *next_scanline = pixel + next_row; uint8_t *cur_scanline = pixel + cur_row; for (int x = 0; x < width; x++) { int cur_col = x * channels; int next_col = min(cur_col + channels, last_col); uint8_t *c00 = cur_scanline + cur_col; uint8_t *c10 = cur_scanline + next_col; uint8_t *c01 = next_scanline + cur_col; uint8_t *c11 = next_scanline + next_col; int r_avg = ((c00[0] + c10[0] + c01[0] + c11[0])) >> 2; int g_avg = ((c00[1] + c10[1] + c01[1] + c11[1])) >> 2; int b_avg = ((c00[2] + c10[2] + c01[2] + c11[2])) >> 2; #if OPT_PLANE /* clang-format off */ plane[0][y*width + x] = c00[0]; plane[1][y*width + x] = c00[1]; plane[2][y*width + x] = c00[2]; /* clang-format on */ #endif /* TODO: detect appropriate RGB values */ if (r_avg >= 60 && g_avg >= 40 && b_avg >= 20 && r_avg >= b_avg && (r_avg - g_avg) >= 10) if (max3(r_avg, g_avg, b_avg) - min3(r_avg, g_avg, b_avg) >= 10) sum++; } } return sum; } void compute_offset(int *out, int len, int left, int right, int step) { assert(out); assert((len >= 0) && (left >= 0) && (right >= 0)); for (int x = -left; x < len + right; x++) { int pos = x; int len2 = 2 * len; if (pos < 0) { do { pos += len2; } while (pos < 0); } else if (pos >= len2) { do { pos -= len2; } while (pos >= len2); } if (pos >= len) pos = len2 - 1 - pos; out[x + left] = pos * step; } } void denoise(uint8_t *out, uint8_t *in, int *smooth_table, int width, int height, int channels, int radius) { assert(in && out); assert(radius > 0); int window_size = (2 * radius + 1) * (2 * radius + 1); int *col_pow = malloc(width * channels * sizeof(int)); int *col_val = malloc(width * channels * sizeof(int)); int *row_pos = malloc((width + 2 * radius) * channels * sizeof(int)); int *col_pos = malloc((height + 2 * radius) * channels * sizeof(int)); int stride = width * channels; compute_offset(row_pos, width, radius, radius, channels); compute_offset(col_pos, height, radius, radius, stride); int *row_off = row_pos + radius; int *col_off = col_pos + radius; for (int y = 0; y < height; y++) { uint8_t *scan_in_line = in + y * stride; uint8_t *scan_out_line = out + y * stride; if (y == 0) { for (int x = 0; x < stride; x += channels) { int col_sum[3] = {0}; int col_sum_pow[3] = {0}; for (int z = -radius; z <= radius; z++) { uint8_t *sample = in + col_off[z] + x; for (int c = 0; c < channels; ++c) { col_sum[c] += sample[c]; col_sum_pow[c] += sample[c] * sample[c]; } } for (int c = 0; c < channels; ++c) { col_val[x + c] = col_sum[c]; col_pow[x + c] = col_sum_pow[c]; } } } else { uint8_t *last_col = in + col_off[y - radius - 1]; uint8_t *next_col = in + col_off[y + radius]; for (int x = 0; x < stride; x += channels) { for (int c = 0; c < channels; ++c) { col_val[x + c] -= last_col[x + c] - next_col[x + c]; col_pow[x + c] -= last_col[x + c] * last_col[x + c] - next_col[x + c] * next_col[x + c]; } } } int prev_sum[3] = {0}, prev_sum_pow[3] = {0}; for (int z = -radius; z <= radius; z++) { int index = row_off[z]; for (int c = 0; c < channels; ++c) { prev_sum[c] += col_val[index + c]; prev_sum_pow[c] += col_pow[index + c]; } } for (int c = 0; c < channels; ++c) { int mean = prev_sum[c] / window_size; int diff = mean - scan_in_line[c]; int edge = CLAMP2BYTE(diff); int masked_edge = (edge * scan_in_line[c] + (256 - edge) * mean) >> 8; int var = (prev_sum_pow[c] - mean * prev_sum[c]) / window_size; int out = masked_edge - diff * var / (var + smooth_table[scan_in_line[c]]); scan_out_line[c] = CLAMP2BYTE(out); } scan_in_line += channels, scan_out_line += channels; for (int x = 1; x < width; x++) { int last_row = row_off[x - radius - 1]; int next_row = row_off[x + radius]; for (int c = 0; c < channels; ++c) { prev_sum[c] -= col_val[last_row + c] - col_val[next_row + c]; prev_sum_pow[c] = prev_sum_pow[c] - col_pow[last_row + c] + col_pow[next_row + c]; int mean = prev_sum[c] / window_size; int diff = mean - scan_in_line[c]; int edge = CLAMP2BYTE(diff); int masked_edge = (edge * scan_in_line[c] + (256 - edge) * mean) >> 8; int var = (prev_sum_pow[c] - mean * prev_sum[c]) / window_size; int out = masked_edge - diff * var / (var + smooth_table[scan_in_line[c]]); scan_out_line[c] = CLAMP2BYTE(out); } scan_in_line += channels, scan_out_line += channels; } } free(col_pow); free(col_val); free(row_pos); free(col_pos); } /* clang-format off */ void denoise2( uint8_t *out, uint8_t **planes, int *smooth_table, int width, int height, int channels, int ch_idx, int radius ){ uint8_t *in = planes[ch_idx]; assert(in && out); assert(radius > 0); int window_size = (2*radius + 1) * (2*radius + 1); int *col_pow = calloc(width * sizeof(int), 1); int *col_val = calloc(width * sizeof(int), 1); int *row_pos = malloc((width + 2*radius) * sizeof(int)); int *col_pos = malloc((height + 2*radius) * sizeof(int)); int stride = width; compute_offset(row_pos, width, radius, radius, 1); compute_offset(col_pos, height, radius, radius, stride); int *row_off = row_pos + radius; int *col_off = col_pos + radius; for (int x = 0; x < stride; x ++) { for (int z = -radius; z <= radius; z++) { uint8_t sample = *(in + col_off[z] + x); col_val[x] += sample; col_pow[x] += sample * sample; } } for (int y = 0; y < height; y++) { uint8_t *scan_in_line = in + y*stride; uint8_t *scan_out_line = out + y*stride*channels; if (y > 0) { uint8_t *last_col = in + col_off[y - radius - 1]; uint8_t *next_col = in + col_off[y + radius]; for (int x = 0; x < stride; x++) { col_val[x] -= last_col[x] - next_col[x]; col_pow[x] -= last_col[x]*last_col[x] - next_col[x]*next_col[x]; } } int prev_sum = 0, prev_sum_pow = 0; for (int z = -radius; z <= radius; z++) { int index = row_off[z]; prev_sum += col_val[index]; prev_sum_pow += col_pow[index]; } for (int x = 0; x < width; x++) { int last_row = row_off[x - radius - 1]; int next_row = row_off[x + radius]; if(x > 0){ prev_sum -= col_val[last_row] - col_val[next_row]; prev_sum_pow = prev_sum_pow - col_pow[last_row] + col_pow[next_row]; } int pix = *scan_in_line; int mean = prev_sum / window_size; int diff = mean - pix; int edge = CLAMP2BYTE(diff); int masked_edge = (edge*pix + (256 - edge)*mean) >> 8; int var = (prev_sum_pow - mean*prev_sum) / window_size; int out = masked_edge - diff*var / (var + smooth_table[pix]); scan_out_line[ch_idx] = CLAMP2BYTE(out); scan_in_line++, scan_out_line += channels; } } free(col_pow); free(col_val); free(row_pos); free(col_pos); } static void die(char *msg) { fprintf(stderr, "Fatal: %s\n", msg); exit(-1); } inline uint64_t time_diff(struct timeval *st, struct timeval *et) { return (et->tv_sec - st->tv_sec)*1000000ULL + (et->tv_usec - st->tv_usec); } /* clang-format on */ int main(int argc, char *argv[]) { if (argc < 2) { printf("%s -i INPUT [-o OUTPUT] [-l LEVEL]\n", argv[0]); return -1; } char *ifn = NULL; char *ofn = "out.jpg"; int smoothing_level = 10; /* clang-format off */ int opt; while ((opt = getopt (argc, argv, "i:l:o:")) != -1){ switch(opt){ case 'i': { ifn = optarg; break; } case 'l': { smoothing_level = atoi(optarg); smoothing_level = (smoothing_level < 1 ? 1 : (smoothing_level > 20 ? 20 : smoothing_level)); break; } case 'o': { ofn = optarg; break; } } } printf("ifn:%s ofn:%s level:%d\n", ifn, ofn, smoothing_level); /* clang-format on */ struct timeval stime, etime; int width = 0, height = 0, channels = 0; uint8_t *in = stbi_load(ifn, &width, &height, &channels, 0); if (!in) die("Fail to load input file"); assert(width > 0 && height > 0); assert(channels >= 3); int dimension = width * height; uint8_t *out = malloc(dimension * channels); if (!out) die("Out of memory"); uint8_t *in_planes[4] = {NULL}; for (int i = 0; i < channels; i++) in_planes[i] = malloc(dimension); /* Separation between skin and non-skin pixels */ gettimeofday(&stime, NULL); float rate = detect(in, in_planes, width, height, channels) / (float) dimension * 100; gettimeofday(&etime, NULL); printf("detect - %lu us\n", time_diff(&stime, &etime)); /* Perform edge detection, resulting in an edge map for further denoise */ /* clang-format off */ gettimeofday(&stime, NULL); int smooth_table[256] = {0}; float ii = 0.f; for (int i = 0; i <= 255; i++, ii -= 1.) { smooth_table[i] = ( expf(ii * (1.0f / (smoothing_level * 255.0f))) + (smoothing_level * (i + 1)) + 1 ) / 2; smooth_table[i] = max(smooth_table[i], 1); } #if OPT_PLANE #pragma omp parallel for for(int i = 0; i < channels; i++) denoise2(out, in_planes, smooth_table, width, height, channels, i, min(width, height)/rate + 1); #else denoise(out, in, smooth_table, width, height, channels, min(width, height) / rate + 1); #endif gettimeofday(&etime, NULL); printf("denoise - %lu us\n", time_diff(&stime, &etime)); /* clang-format on */ if (!stbi_write_jpg(ofn, width, height, channels, out, 100)) die("Fail to generate"); for (int i = 0; i < channels; i++) free(in_planes[i]); free(out); free(in); return 0; }
optQCCFaster.c
#include <mex.h> #include <math.h> #include <stdlib.h> #include <time.h> #include <string.h> /* q_c.singleton = optQCMFC(condQB,prediction,Sigma_c,mu_c,c_c,mu_a_b,numColumnsPred,numColumnsShape,columnsPredShapeVec,columnsPredShapeFactorVec); * */ int min(int A, int B) { if (A < B) { return A; } else { return B; } } int max(int A, int B) { if (A > B) { return A; } else { return B; } } float expf_fast(float a) { union { float f; int x; } u; u.x = (int) (12102203 * a + 1064866805); return u.f; } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { /* Input variables */ double *condQB = mxGetPr(prhs[0]); double *prediction = mxGetPr(prhs[1]); double *mu_a_b = mxGetPr(prhs[2]); int numColumnsPred = (int) mxGetScalar(prhs[3]); int numColumnsShape = (int) mxGetScalar(prhs[4]); int *colA = (int*) mxGetData(prhs[5]); double *colAFac = mxGetPr(prhs[6]); int *colB = (int*) mxGetData(prhs[7]); double *colBFac = mxGetPr(prhs[8]); double *factorsPrec = mxGetPr(prhs[9]); double *hashTable = mxGetPr(prhs[10]); int *cmin = (int*) mxGetData(prhs[11]); int *cmax = (int*) mxGetData(prhs[12]); /* intern variables and pointers */ double* q_c = NULL; double* boundaries = NULL; int i,j,k,i1,i2; int numRows = mxGetM(prhs[0]); int numBounds = mxGetN(prhs[0])/numColumnsPred; int alphaSize = numRows*numBounds*sizeof(double); double* alpha = malloc(alphaSize); double* beta = malloc(alphaSize); double* c = malloc(numBounds*sizeof(double)); double alphaTotal,q_c_total,tmpA,tmpB,valA,valB,factorA,factorB,cInv; double* preCalcA = malloc(numRows*sizeof(double)); double* preCalcB = malloc(numRows*sizeof(double)); int idxQC,idxPred,idx,idxA,idxB,idxC,idxANumRows,idxBNumRows,idxCond,idxBounds; int* A = malloc(numBounds*sizeof(int)); int* B = malloc(numBounds*sizeof(int)); /* determines when to use the hash table */ /*int limit = 10;*/ int limit2 = -30; /* int counter = 0;*/ /* switch from matlab indexing to C indexing */ for (i=0; i < numColumnsPred; i++) { colA[i] = colA[i]-1; colB[i] = colB[i]-1; } plhs[0] = mxCreateDoubleMatrix(1,numRows*numBounds*numColumnsPred,mxREAL); q_c = mxGetPr(plhs[0]); plhs[1] = mxCreateDoubleMatrix(numBounds,numColumnsPred,mxREAL); boundaries = mxGetPr(plhs[1]); /* ****** start sum-product ******** */ for (j=0; j < numColumnsPred; j++) { /* for (j=0; j < 1; j++) { */ memset(alpha, 0, alphaSize); memset(beta, 0, alphaSize); idxPred = j; /* calculate limits of for-loops corresponding to transition matrices */ for (k=0; k < numBounds; k++) { A[k] = cmin[idxPred + k*numColumnsPred]; B[k] = cmax[idxPred + k*numColumnsPred]; /*printf("%d, %d: %d, %d\n",j,k,A[k],B[k]);*/ } alphaTotal = 0; /* shape index for condQB */ idxA = colA[idxPred]*numRows; idxB = colB[idxPred]*numRows; /* pred index for prediction */ idxC = idxPred*numRows*numBounds; for (i = A[0]; i <= B[0]; i++) { alpha[i] = condQB[j*numRows + i]*prediction[idxC + i]; alphaTotal += alpha[i]; } c[0] = alphaTotal; alphaTotal = 1/alphaTotal; /* normalize alpha */ for (i=A[0]; i <= B[0]; i++) { alpha[i] *= alphaTotal; } /* make forward message passing over all boundaries */ /* for boundaries 2 to numBounds */ for (k=1; k < numBounds; k++) { /* for(k=1; k < 0; k++) { */ /* preCalc index for inner loop */ idxA = colA[idxPred] + (k-1)*numColumnsShape; idxB = colB[idxPred] + (k-1)*numColumnsShape; factorA = -0.5*factorsPrec[idxA]; factorB = -0.5*factorsPrec[idxB]; idxANumRows = idxA*numRows; idxBNumRows = idxB*numRows; idxCond = k*numColumnsPred*numRows + j*numRows; idx = numRows*k; alphaTotal = 0; /* iterates over the columns of each transition matrix; corresponds to idxNonZeroA in matlab; determines the non-zero entries of the current alpha */ #pragma omp parallel for private(tmpA,tmpB,valA,valB,i2) reduction(+:alphaTotal) for (i1 = A[k]; i1 <= B[k]; i1++) { tmpA = tmpB = 0; /* iterates over the rows of transition matrices; corresponds to idxNonZeroB in matlab */ /* upper triangular matrix --> ordering constraint on boundaries */ for (i2 = A[k-1]; i2 <= min(i1,B[k-1]); i2++) { valA = (i1 + 1 - mu_a_b[idxANumRows + i2]); valA = valA*valA*factorA; valB = (i1 + 1 - mu_a_b[idxBNumRows + i2]); valB = valB*valB*factorB; if (valA > limit2) {tmpA += alpha[idx - numRows + i2]*hashTable[(int)(-valA*1000 + 0.5)];} if (valB > limit2) {tmpB += alpha[idx - numRows + i2]*hashTable[(int)(-valB*1000 + 0.5)];} } alpha[idx + i1] = prediction[idxC + idx + i1]*condQB[idxCond+i1]*(colAFac[idxPred]*tmpA + colBFac[idxPred]*tmpB); alphaTotal += alpha[idx + i1]; } c[k] = alphaTotal; alphaTotal = 1/alphaTotal; /* normalize alpha */ for (i = A[k]; i <= B[k]; i++) { alpha[idx + i] *= alphaTotal; } } /* end for over bounds k */ /* init beta for the last node */ idxQC = j*numBounds*numRows; idxBounds = (j+1)*numBounds - 1; boundaries[idxBounds] = 0; for (i=(numBounds-1)*numRows;i<numRows*numBounds;i++) { beta[i] = 1; q_c[idxQC + i] = alpha[i]; boundaries[idxBounds] += alpha[i]*((i+1)-(numBounds-1)*numRows); } /* message backward */ for (k=numBounds-2; k >= 0; k--) { /* for (k = 0; k < 0; k++) {*/ idxCond = j*numRows + (k+1)*numColumnsPred*numRows; idxB = numRows*(k+1); idxA = idxPred*numRows*numBounds + (k+1)*numRows; /* precalculate entries for inner loop over z_{n+1}, that are independent of z_n */ for (i=A[k+1]; i <= B[k+1]; i++) { preCalcA[i] = beta[idxB + i]*prediction[idxA + i]*condQB[idxCond + i]; /*preCalcB[i] = beta[idxB + i]*prediction[idxA + i]*condQB[idxCondA + i];*/ } /* preCalc idx for inner loop */ idxA = colA[idxPred] + k*numColumnsShape; idxB = colB[idxPred] + k*numColumnsShape; factorA = -0.5*factorsPrec[idxA]; factorB = -0.5*factorsPrec[idxB]; idxANumRows = idxA*numRows; idxBNumRows = idxB*numRows; idx = numRows*k; /* the outer loop (over z_n) is constrained by alpha (and therefor condQB), the inner loop over (z_{n+1}) by condQB */ q_c_total = 0; cInv = 1/c[k+1]; #pragma omp parallel for private(tmpA,tmpB,valA,valB,i2) reduction(+:q_c_total) for (i1 = A[k]; i1 <= B[k]; i1++) { tmpA = tmpB = 0; /* idxFinal */ for (i2 = max(A[k+1],i1); i2 <= B[k+1]; i2++) { valA = factorA*(i2 + 1 - mu_a_b[idxANumRows + i1])*(i2 + 1 - mu_a_b[idxANumRows + i1]); valB = factorB*(i2 + 1 - mu_a_b[idxBNumRows + i1])*(i2 + 1 - mu_a_b[idxBNumRows + i1]); if (valA > limit2) {tmpA += preCalcA[i2]*hashTable[(int)(-valA*1000 + 0.5)];} if (valB > limit2) {tmpB += preCalcA[i2]*hashTable[(int)(-valB*1000 + 0.5)];} } beta[idx + i1] = (colAFac[idxPred]*tmpA + colBFac[idxPred]*tmpB)*cInv; q_c[idxQC + idx + i1] = alpha[idx + i1]*beta[idx + i1]; q_c_total += q_c[idxQC + idx + i1]; } idxBounds = j*numBounds + k; boundaries[idxBounds] = 0; /* convert to inverse */ q_c_total = 1/q_c_total; /* normalize q_c distribution */ for (i1 = A[k]; i1 <= B[k]; i1++) { q_c[idxQC + idx + i1] *= q_c_total; boundaries[idxBounds] += q_c[idxQC + idx + i1]*(i1+1); } } } free(alpha); free(beta); free(c); free(preCalcA); free(preCalcB); free(A); free(B); }
opencl_7z_fmt_plug.c
/* * Modified by Dhiru Kholia <dhiru at openwall.com> for 7z format. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_sevenzip; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_sevenzip); #else #include <string.h> #include <openssl/aes.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "misc.h" #include "common-opencl.h" #include "options.h" #include "crc32.h" #include "stdint.h" #include "unicode.h" #include "memdbg.h" #define FORMAT_LABEL "7z-opencl" #define FORMAT_NAME "7-Zip" #define FORMAT_TAG "$7z$" #define TAG_LENGTH 4 #define ALGORITHM_NAME "SHA256 OPENCL AES" #define BENCHMARK_COMMENT " (512K iterations)" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define PLAINTEXT_LENGTH ((55-8)/2) #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #define BIG_ENOUGH (8192 * 32) typedef struct { uint32_t length; uint16_t v[PLAINTEXT_LENGTH]; } sevenzip_password; typedef struct { uint8_t key[32]; } sevenzip_hash; typedef struct { uint32_t length; uint32_t iterations; uint8_t salt[16]; } sevenzip_salt; typedef struct { cl_uint total[2]; cl_uint state[8]; cl_uchar buffer[64]; } SHA256_CTX; typedef struct { cl_ulong t; SHA256_CTX ctx; cl_uint len; cl_ushort buffer[PLAINTEXT_LENGTH]; } sevenzip_state; static int *cracked; static int any_cracked; static struct custom_salt { int NumCyclesPower; int SaltSize; int ivSize; int type; unsigned char data[BIG_ENOUGH]; unsigned char iv[16]; unsigned char salt[16]; unsigned int crc; int length; /* used in decryption */ int unpacksize; /* used in CRC calculation */ } *cur_salt; static struct fmt_tests sevenzip_tests[] = { /* CRC checks passes for these hashes */ {"$7z$0$19$0$1122$8$d1f50227759415890000000000000000$1412385885$112$112$5e5b8b734adf52a64c541a5a5369023d7cccb78bd910c0092535dfb013a5df84ac692c5311d2e7bbdc580f5b867f7b5dd43830f7b4f37e41c7277e228fb92a6dd854a31646ad117654182253706dae0c069d3f4ce46121d52b6f20741a0bb39fc61113ce14d22f9184adafd6b5333fb1", "password"}, {"$7z$0$19$0$1122$8$a264c94f2cd72bec0000000000000000$725883103$112$108$64749c0963e20c74602379ca740165b9511204619859d1914819bc427b7e5f0f8fc67f53a0b53c114f6fcf4542a28e4a9d3914b4bc76baaa616d6a7ec9efc3f051cb330b682691193e6fa48159208329460c3025fb273232b82450645f2c12a9ea38b53a2331a1d0858813c8bf25a831", "openwall"}, /* padding check passes for these hashes */ {"$7z$0$19$0$1122$8$732b59fd26896e410000000000000000$2955316379$192$183$7544a3a7ec3eb99a33d80e57907e28fb8d0e140ec85123cf90740900429136dcc8ba0692b7e356a4d4e30062da546a66b92ec04c64c0e85b22e3c9a823abef0b57e8d7b8564760611442ecceb2ca723033766d9f7c848e5d234ca6c7863a2683f38d4605322320765938049305655f7fb0ad44d8781fec1bf7a2cb3843f269c6aca757e509577b5592b60b8977577c20aef4f990d2cb665de948004f16da9bf5507bf27b60805f16a9fcc4983208297d3affc4455ca44f9947221216f58c337f", "password"}, /* not supported hashes, will require validFolder check */ // {"$7z$0$19$0$1122$8$5fdbec1569ff58060000000000000000$2465353234$112$112$58ba7606aafc7918e3db7f6e0920f410f61f01e9c1533c40850992fee4c5e5215bc6b4ea145313d0ac065b8ec5b47d9fb895bb7f97609be46107d71e219544cfd24b52c2ecd65477f72c466915dcd71b80782b1ac46678ab7f437fd9f7b8e9d9fad54281d252de2a7ae386a65fc69eda", "password"}, {NULL} }; static sevenzip_password *inbuffer; static sevenzip_hash *outbuffer; static sevenzip_salt currentsalt; static cl_mem mem_in, mem_out, mem_state, mem_salt; static cl_kernel sevenzip_init; #define insize (sizeof(sevenzip_password) * global_work_size) #define outsize (sizeof(sevenzip_hash) * global_work_size) #define statesize (sizeof(sevenzip_state) * global_work_size) #define saltsize (sizeof(sevenzip_salt)) #define cracked_size (sizeof(*cracked) * global_work_size) #define MIN(a, b) (((a) > (b)) ? (b) : (a)) #define OCL_CONFIG "7z" #define HASH_LOOPS 4096 #define LOOP_COUNT ((1 << currentsalt.iterations) + HASH_LOOPS - 1) / HASH_LOOPS #define STEP 0 #define SEED 16 static int split_events[] = { 2, -1, -1 }; static const char *warn[] = { "xfer: " , ", init: ", ", crypt: ", ", xfer: " }; // This file contains auto-tuning routine(s). It has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { size_t s; s = autotune_get_task_max_work_group_size(FALSE, 0, sevenzip_init); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel)); return s; } static size_t get_task_max_size() { return 0; } static size_t get_default_workgroup() { if (cpu(device_info[gpu_id])) return get_platform_vendor_id(platform_id) == DEV_INTEL ? 8 : 1; else return 64; } static void create_clobj(size_t global_work_size, struct fmt_main *self) { cl_int cl_error; inbuffer = (sevenzip_password*) mem_calloc(insize); outbuffer = (sevenzip_hash*) mem_alloc(outsize); cracked = mem_calloc(cracked_size); // Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, saltsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem salt"); mem_state = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, statesize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem state"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(sevenzip_init, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(sevenzip_init, 1, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(sevenzip_init, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); } static void release_clobj(void) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem salt"); HANDLE_CLERROR(clReleaseMemObject(mem_state), "Release mem state"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(cracked); } static void done(void) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(sevenzip_init), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); } static int crypt_all(int *pcount, struct db_salt *salt); static int crypt_all_benchmark(int *pcount, struct db_salt *salt); static void init(struct fmt_main *self) { CRC32_t crc; char build_opts[64]; cl_int cl_error; CRC32_Init(&crc); snprintf(build_opts, sizeof(build_opts), "-DPLAINTEXT_LENGTH=%d -DHASH_LOOPS=%d", PLAINTEXT_LENGTH, HASH_LOOPS); opencl_init("$JOHN/kernels/7z_kernel.cl", gpu_id, build_opts); sevenzip_init = clCreateKernel(program[gpu_id], "sevenzip_init", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); crypt_kernel = clCreateKernel(program[gpu_id], "sevenzip_crypt", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, HASH_LOOPS, split_events, warn, 2, self, create_clobj, release_clobj, sizeof(sevenzip_salt), 0); // Auto tune execution from shared/included code. self->methods.crypt_all = crypt_all_benchmark; autotune_run(self, 1 << 19, 0, 15000000000ULL); self->methods.crypt_all = crypt_all; if (pers_opts.target_enc == UTF_8) self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int isdecu(char *q) { char buf[24]; unsigned int x = atou(q); sprintf(buf, "%u", x); return !strcmp(q,buf); } static int isdec(char *q) { char buf[24]; int x = atoi(q); sprintf(buf, "%d", x); return !strcmp(q,buf); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int len, type, NumCyclesPower; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtok(ctcopy, "$")) == NULL) goto err; if (strlen(p) > 1) goto err; type = atoi(p); if (type != 0) goto err; if ((p = strtok(NULL, "$")) == NULL) /* NumCyclesPower */ goto err; if (strlen(p) > 2) goto err; NumCyclesPower = atoi(p); if (NumCyclesPower > 24 || NumCyclesPower < 1) goto err; if ((p = strtok(NULL, "$")) == NULL) /* salt length */ goto err; len = atoi(p); if(len > 16 || len < 0) /* salt length */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* salt */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* iv length */ goto err; if (strlen(p) > 2) goto err; len = atoi(p); if(len < 0 || len > 16) /* iv length */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* iv */ goto err; if (!ishex(p)) goto err; if (strcmp(p+len*2, "0000000000000000")) goto err; if ((p = strtok(NULL, "$")) == NULL) /* crc */ goto err; if (!isdecu(p)) goto err; if ((p = strtok(NULL, "$")) == NULL) /* data length */ goto err; len = atoi(p); if ((p = strtok(NULL, "$")) == NULL) /* unpacksize */ goto err; if (!isdec(p)) /* no way to validate, other than atoi() works for it */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* data */ goto err; if (strlen(p) != len * 2) /* validates data_len atoi() */ goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static union { struct custom_salt _cs; ARCH_WORD_32 dummy; } un; struct custom_salt *cs = &(un._cs); ctcopy += 4; p = strtok(ctcopy, "$"); cs->type = atoi(p); p = strtok(NULL, "$"); cs->NumCyclesPower = atoi(p); p = strtok(NULL, "$"); cs->SaltSize = atoi(p); p = strtok(NULL, "$"); /* salt */ p = strtok(NULL, "$"); cs->ivSize = atoi(p); p = strtok(NULL, "$"); /* iv */ for (i = 0; i < cs->ivSize; i++) cs->iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "$"); /* crc */ cs->crc = atou(p); p = strtok(NULL, "$"); cs->length = atoi(p); p = strtok(NULL, "$"); cs->unpacksize = atoi(p); p = strtok(NULL, "$"); /* crc */ for (i = 0; i < cs->length; i++) cs->data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->SaltSize); currentsalt.length = cur_salt->SaltSize; currentsalt.iterations = cur_salt->NumCyclesPower; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, saltsize, &currentsalt, 0, NULL, NULL), "Transfer salt to gpu"); } static void clear_keys(void) { memset(inbuffer, 0, insize); } static void sevenzip_set_key(char *key, int index) { UTF16 c_key[PLAINTEXT_LENGTH + 1]; int length = strlen(key); /* Convert password to utf-16-le format (--encoding aware) */ length = enc_to_utf16(c_key, PLAINTEXT_LENGTH, (UTF8*)key, length); if (length <= 0) length = strlen16(c_key); inbuffer[index].length = length; memcpy(inbuffer[index].v, c_key, 2 * length); } static char *get_key(int index) { UTF16 c_key[PLAINTEXT_LENGTH + 1]; int length = inbuffer[index].length; memcpy(c_key, inbuffer[index].v, 2 * length); c_key[length] = 0; return (char*)utf16_to_enc(c_key); } // XXX port Python code to C *OR* use code from LZMA SDK static int validFolder(unsigned char *data) { // int numcoders = self._read64Bit(file) return 0; } static int sevenzip_decrypt(unsigned char *derived_key, unsigned char *data) { unsigned char out[cur_salt->length]; AES_KEY akey; unsigned char iv[16]; union { unsigned char crcc[4]; unsigned int crci; } _crc_out; unsigned char *crc_out = _crc_out.crcc; unsigned int ccrc; CRC32_t crc; int i; int nbytes, margin; memcpy(iv, cur_salt->iv, 16); if(AES_set_decrypt_key(derived_key, 256, &akey) < 0) { fprintf(stderr, "AES_set_decrypt_key failed in crypt!\n"); } AES_cbc_encrypt(cur_salt->data, out, cur_salt->length, &akey, iv, AES_DECRYPT); /* various verifications tests */ // test 0, padding check, bad hack :-( margin = nbytes = cur_salt->length - cur_salt->unpacksize; i = cur_salt->length - 1; while (nbytes > 0) { if (out[i] != 0) return -1; nbytes--; i--; } if (margin > 7) { // printf("valid padding test ;-)\n"); // print_hex(out, cur_salt->length); return 0; } // test 1, CRC test CRC32_Init(&crc); CRC32_Update(&crc, out, cur_salt->unpacksize); CRC32_Final(crc_out, crc); ccrc = _crc_out.crci; // computed CRC if (ccrc == cur_salt->crc) return 0; // XXX don't be too eager! // XXX test 2, "well-formed folder" test if (validFolder(out)) { printf("validFolder check ;-)\n"); return 0; } return -1; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int i, index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = local_work_size ? (count + local_work_size - 1) / local_work_size * local_work_size : count; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } // Copy data to gpu HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, NULL), "Copy data to gpu"); // Run 1st kernel HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], sevenzip_init, 1, NULL, &global_work_size, lws, 0, NULL, NULL), "Run init kernel"); // Run loop kernel for (i = 0; i < LOOP_COUNT; i++) { HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, NULL), "Run loop kernel"); HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } // Read the result back HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, NULL), "Copy result back"); #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { /* decrypt and check */ if(sevenzip_decrypt(outbuffer[index].key, cur_salt->data) == 0) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } return count; } static int crypt_all_benchmark(int *pcount, struct db_salt *salt) { int count = *pcount; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = local_work_size ? (count + local_work_size - 1) / local_work_size * local_work_size : count; // Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); // Run 1st kernels BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], sevenzip_init, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run init kernel"); // Warm-up run BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, NULL), "Run loop kernel"); // Loop kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[2]), "Run loop kernel"); // Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[3]), "Copy result back"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } #if FMT_MAIN_VERSION > 11 static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int)(1 << my_salt->NumCyclesPower); } #endif struct fmt_main fmt_opencl_sevenzip = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT | FMT_UNICODE | FMT_UTF8, #if FMT_MAIN_VERSION > 11 { "iteration count", }, #endif sevenzip_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { iteration_count, }, #endif fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, set_salt, sevenzip_set_key, get_key, clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
distribute_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd foo // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd safelen(4) void test_no_clause() { int i; #pragma omp target #pragma omp teams #pragma omp distribute simd for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{statement after '#pragma omp distribute simd' must be a for loop}} #pragma omp distribute simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp target #pragma omp teams #pragma omp distribute simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd; for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd safelen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd safelen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd safelen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // xxpected-error@+1 {{expected expression}} #pragma omp distribute simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd safelen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd safelen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd simdlen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd simdlen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd simdlen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd simdlen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd simdlen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd collapse for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // xxpected-error@+1 {{expected expression}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams #pragma omp distribute simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as reduction}} #pragma omp target #pragma omp teams #pragma omp distribute simd collapse(2) reduction(+ : i) for (i = 0; i < 16; ++i) // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+2 2 {{reduction variable must be shared}} // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; #pragma omp target #pragma omp teams for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) #pragma omp distribute simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd linear( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd linear(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd linear(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute simd linear(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute simd linear(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute simd linear(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear(x :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd linear(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(x : 2 * 2) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd linear(x : 1, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd linear(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} #pragma omp distribute simd linear(x) linear(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} #pragma omp distribute simd private(x) linear(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} #pragma omp distribute simd linear(x) private(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} #pragma omp distribute simd linear(x, y : 0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} #pragma omp distribute simd linear(x) lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} #pragma omp distribute simd lastprivate(x) linear(x) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd aligned(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(x :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp distribute simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd private( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd private(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd private() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_firstprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd firstprivate( for (i = 0; i < 16; ++i) ; } void test_lastprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_reduction() { int i, x, y; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected identifier}} #pragma omp distribute simd reduction( : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(+ for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+: for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ :, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ : x, + : y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected identifier}} #pragma omp distribute simd reduction(% : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(+ : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(* : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(- : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(& : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(| : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(^ : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(&& : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(|| : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(max : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(min : x) for (i = 0; i < 16; ++i) ; struct X { int x; }; struct X X; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd reduction(+ : X.x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd reduction(+ : x + x) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void linear_modifiers(int argc) { int f; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(f) for (int k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(val(f)) for (int k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(uval(f)) // expected-error {{expected 'val' modifier}} for (int k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(ref(f)) // expected-error {{expected 'val' modifier}} for (int k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(foo(f)) // expected-error {{expected 'val' modifier}} for (int k = 0; k < argc; ++k) ++k; }
example_04-ArrayOfStructs-Naive-Omp-SIMD-Tiled.c
/* * SPDX-License-Identifier: BSD-3-Clause * * example_04-ArrayOfStructs-Naive-Omp-SIMD-Tiled.c : * Example of SPH Density Calculation using a * naive implementation of the main density loop, * no neighbours earch, and Array of Structs (AoS) * data layout, OpenMP parallelization and SIMD * directives on the kernel and density calculation. * This incorporates strip mining and exchange to * implement cache blocking and support performance * for large number of particles that would otherwise * be lost. * * (C) Copyright 2021 José Hugo Elsas * Author: José Hugo Elsas <jhelsas@gmail.com> * * Command Line Options: * -runs <int> : Set the number of repetitions (runs) for * calculating the density. The value of * the density is based on the last * iteration. * Default value: 1 * -run_seed <int>: Flag to set an alternative seed use for * for the PRNG. Instead of feeding seed * to the PRNG directly, it feeds * seed + iteration, as to generate different * configurations for each iteration. * Default value: 0 - (possible 0/1) * -seed <int>: Set the seed to use for the SPH particles * uniform position generation in the box * Default value: 123123123 * * -N <int>: Set the number of SPH particles to be used * Default value: 1e5 = 100,000 * -h <float>: Set the value of the smoothing kernel * parameter h, which corresponds to half * of the support of the kernel. * Default value: 0.05 * * -Nx <int>: Set the number of Cells in the X direction * Default value: 10 * -Ny <int>: Set the number of Cells in the Y direction * Default value: 10 * -Nz <int>: Set the number of Cells in the Z direction * Default value: 10 * * -Xmin <float>: Set the lower bound in the X direction for * the Cell Linked List box * Default value: 0.0 * -Ymin <float>: Set the lower bound in the Y direction for * the Cell Linked List box * Default value: 0.0 * -Ymin <float>: Set the lower bound in the Z direction for * the Cell Linked List box * Default value: 0.0 * * -Xmax <float>: Set the lower bound in the X direction for * the Cell Linked List box * Default value: 1.0 * -Ymax <float>: Set the lower bound in the Y direction for * the Cell Linked List box * Default value: 1.0 * -Zmax <float>: Set the lower bound in the Z direction for * the Cell Linked List box * Default value: 1.0 */ #include <math.h> #include <ctype.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <limits.h> #include <unistd.h> #include <stdbool.h> #include <sys/time.h> #include <inttypes.h> #include <omp.h> #include <gsl/gsl_math.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <gsl/gsl_heapsort.h> #include "sph_data_types.h" #include "sph_linked_list.h" #include "sph_utils.h" #ifndef M_PI #define M_PI (3.14159265358979323846) #endif #define COMPUTE_BLOCKS 1 int main_loop(int run, bool run_seed, int64_t N, double h, long int seed, linkedListBox *box, SPHparticle *lsph, double *times); int compute_density_3d_naive_omp_simd_tiled(int N,double h,SPHparticle *lsph); double w_bspline_3d_constant(double h); #pragma omp declare simd double w_bspline_3d_simd(double q); int main(int argc, char **argv){ bool run_seed = false; // By default the behavior is is to use the same seed int err, runs = 1; // By default the main loop only runs once long int seed = 123123123; // The default seed is 123123123 int64_t N = 100000; // The default number of particles is N = 1e5 = 100,000 double h=0.05; // The default kernel smoothing length is h = 0.05 linkedListBox *box; // Uninitialized Box containing the cells for the cell linked list method SPHparticle *lsph; // Uninitialized array of SPH particles box = (linkedListBox*)malloc(1*sizeof(linkedListBox)); // Create a box representing the entire 3d domain // allow for command line customization of the run arg_parse(argc,argv,&N,&h,&seed,&runs,&run_seed,box); // Parse the command line options // line arguments and override default values lsph = (SPHparticle*)malloc(N*sizeof(SPHparticle)); // Create an array of N particles double times[runs*COMPUTE_BLOCKS]; for(int run=0;run<runs;run+=1) main_loop(run,run_seed,N,h,seed,box,lsph,times); bool is_cll = false; const char *prefix = "ex04,naive,AoS,omp,simd,tiled"; print_time_stats(prefix,is_cll,N,h,seed,runs,lsph,box,times); print_sph_particles_density(prefix,is_cll,N,h,seed,runs,lsph,box); free(lsph); safe_free_box(box); return 0; } /* * Function main_loop: * Runs the main loop of the program, including the particle array generation, * density calculation and the timings annotations. * * Arguments: * run <int> : index (or value) or the present iteration * run_seed <bool> : boolean defining whether to use run index for seed or not * N <int> : Number of SPH particles to be used in the run * h <double> : Smoothing Length for the Smoothing Kernel w_bspline * seed <long int> : seed for GSL PRNG generator to generate particle positions * box <linkedListBox> : Box of linked list cells, encapsulating the 3d domain * lsph <SPHparticle> : Array (pointer) of SPH particles to be updated * times <double> : Array to store the computation timings to be updated * Returns: * 0 : error code returned * lsph <SPHparticle> : SPH particle array is updated in the rho field by reference * times <double> : Times is updated by reference */ int main_loop(int run, bool run_seed, int64_t N, double h, long int seed, linkedListBox *box, SPHparticle *lsph, double *times) { int err; if(run_seed) err = gen_unif_rdn_pos_box(N,seed+run,box,lsph); else err = gen_unif_rdn_pos_box(N,seed,box,lsph); if(err) fprintf(stderr,"error in gen_unif_rdn_pos\n"); // ------------------------------------------------------ // double t0,t1; t0 = omp_get_wtime(); compute_density_3d_naive_omp_simd_tiled(N,h,lsph); // Compute the density for all particles t1 = omp_get_wtime(); // ------------------------------------------------------ // times[COMPUTE_BLOCKS*run+0] = t1-t0; // Only one component to measure time return 0; } /* * Function compute_density_3d_naive_omp_simd_tiled: * Computes the SPH density from the particles implementing a strip mine and exchange * strategy to re-use data in cache over the direct loop. It executes calculations * in parallel for the outer-most loop using openMP and SIMD in inner-most loop, * though SIMD only for limited success. * * Reference: https://en.wikipedia.org/wiki/Loop_nest_optimization * * Arguments: * N <int> : Number of SPH particles to be used in the run * h <double> : Smoothing Length for the Smoothing Kernel w_bspline * lsph <SPHparticle> : Array (pointer) of SPH particles to be updated * Returns: * 0 : error code returned * lsph <SPHparticle> : SPH particle array is updated in the rho field by reference */ int compute_density_3d_naive_omp_simd_tiled(int N,double h,SPHparticle *lsph){ const double inv_h = 1./h; // Pre-invert the smoothing distance const double kernel_constant = w_bspline_3d_constant(h); // Pre-compute the 3d normalization constant const int64_t STRIP = 500; // Setting the size of the strip or block #pragma omp parallel for // Run the iteration in Parallel for(int64_t ii=0;ii<N;ii+=1) // Iterate lsph[ii].rho = 0.; // Pre-initialize the density to zero #pragma omp parallel for // Run the iteration in i in parallel for(int64_t i=0;i<N;i+=STRIP){ // Breaking up the i and j iterations in blocks for(int64_t j=0;j<N;j+=STRIP){ // of size STRIP to do data re-use and cache blocking for(int64_t ii=i;ii < ((i+STRIP<N)?(i+STRIP):N); ii+=1){ // Iterate a block over ii double xii = lsph[ii].r.x; // Load the position in X for ii double yii = lsph[ii].r.y; // Load the position in Y for ii double zii = lsph[ii].r.z; // Load the position in Z for ii double rhoii = 0.0; // Initialize partial density ii density to zero #pragma omp simd // Hint at the compiler to vectorize this loop for(int64_t jj=j;jj < ((j+STRIP<N)?(j+STRIP):N); jj+=1 ){ // and iterate over the jj part of the block double q = 0.; // initialize the distance variable double xij = xii-lsph[jj].r.x; // Load and subtract jj particle's X position component double yij = yii-lsph[jj].r.y; // Load and subtract jj particle's Y position component double zij = zii-lsph[jj].r.z; // Load and subtract jj particle's Z position component q += xij*xij; // Add the jj contribution to the ii distance in X q += yij*yij; // Add the jj contribution to the ii distance in Y q += zij*zij; // Add the jj contribution to the ii distance in Z q = sqrt(q)*inv_h; // Sqrt and normalizing the distance by the smoothing lengh rhoii += lsph[jj].nu*w_bspline_3d_simd(q); // Add up the contribution from the jj particle } // to the intermediary density and then lsph[ii].rho += kernel_constant*rhoii; // add the intermediary density to the full density } } } return 0; } /* * Function w_bspline_3d_constant: * Returns the 3d normalization constant for the cubic b-spline SPH smoothing kernel * * Arguments: * h <double> : Smoothing Length for the Smoothing Kernel w_bspline * Returns: * 3d bspline normalization density <double> */ double w_bspline_3d_constant(double h){ return 3./(2.*M_PI*h*h*h); // 3d normalization value for the b-spline kernel } /* * Function w_bspline_3d_simd: * Returns the un-normalized value of the cubic b-spline SPH smoothing kernel * * Arguments: * q <double> : Distance between particles normalized by the smoothing length h * Returns: * wq <double> : Unnormalized value of the kernel * * Observation: * Why not else if(q<2.)? * Because if you use "else if", the compiler refuses to vectorize, * This results in a large slowdown, as of 2.5x slower for example_04 */ #pragma omp declare simd double w_bspline_3d_simd(double q){ // Use as input the normalized distance double wq = 0.0; double wq1 = (0.6666666666666666 - q*q + 0.5*q*q*q); // The first polynomial of the spline double wq2 = 0.16666666666666666*(2.-q)*(2.-q)*(2.-q); // The second polynomial of the spline if(q<2.) // If the distance is below 2 wq = wq2; // Use the 2nd polynomial for the spline if(q<1.) // If the distance is below 1 wq = wq1; // Use the 1nd polynomial for the spline return wq; // return which ever value corresponds to the distance }
GB_unaryop__abs_int32_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int32_int16 // op(A') function: GB_tran__abs_int32_int16 // C type: int32_t // A type: int16_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ int16_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int32_int16 ( int32_t *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sample_sections_barrier_single_master.c
/* Andre Augusto Giannotti Scota (https://sites.google.com/view/a2gs/) */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <omp.h> #include "openmp_util.h" int main(int argc, char *argv[]) { /* #pragma omp parallel num_threads(3) */ /* omp_set_num_threads(2); */ dumpEnviroment(); printf("Starting...\n\n"); #pragma omp parallel { DEBUG(printf("Start section a\n");) function_a(0); DEBUG(printf("Start section a\n\n");) DEBUG(printf("Start section b (sleep)\n");) function_b(1); DEBUG(printf("End section b\n\n");) /* Explicit barrier */ #pragma omp barrier /* Only one thread will run this */ #pragma omp single printf("-----------------------------\n"); DEBUG(printf("Start section c\n");) function_c(0); DEBUG(printf("End section c\n\n");) #pragma omp single { DEBUG(printf("Start section d\n");) function_d(0); DEBUG(printf("End section d\n\n");) } /* Only master (0) thread will run this */ #pragma omp master { printf("Only master thread print this. Thread Id: [%d]\n", omp_get_thread_num()); sleep(1); } /* Implicit barrier here */ } printf("End.\n"); return(0); }
lake_opt.c
/************************************** * Author: Rahul Krishna * unity: rkrish11 **************************************/ /************************************* * lake.c * * Models pebbles on a lake * Description: * * This program uses centered finite differencing to * solve the wave equation with sources. * * The interface is given as * * lake [grid_size] [# of pebbles] [end time] [# threads] * * where * * grid_size - integer, size of one edge of the square grid; * so the true size of the computational grid will * be grid_size * grid_size * * # of pebbles - number of simulated "pebbles" to start with * * end time - the simulation starts from t=0.0 and goes to * t=[end time] * * # threads - the number of threads the simulation uses * **************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include "omp.h" // #include "openacc.h" #include "./lake.h" #include "./lake_util.h" /* Probably not necessary but doesn't hurt */ #define _USE_MATH_DEFINES int main(int argc, char *argv[]) { if(argc != 5) { fprintf(stdout, "Usage: %s npoints npebs time_finish nthreads \n",argv[0]); return 0; } /* grab the arguments and setup some vars */ int npoints = atoi(argv[1]); int npebs = atoi(argv[2]); double end_time = (double)atof(argv[3]); int nthreads = atoi(argv[4]); int narea = npoints * npoints; /* check input params for resitrictions */ if ( npoints % nthreads != 0 ) { fprintf(stderr, "BONK! npoints must be evenly divisible by nthreads\n Try again!"); return 0; } /* get the program directory */ set_wrkdir(argv[0]); /* main simulation arrays */ double *u_i0, *u_i1; double *u_cpu, *pebs; /* u_err is used when calculating the * error between one version of the code * and another. */ double *u_err; /* h is the size of each grid cell */ double h; /* used for error analysis */ double avgerr; /* used for time analysis */ double elapsed_cpu; struct timeval cpu_start, cpu_end; /* allocate arrays */ u_i0 = (double*)malloc(sizeof(double) * narea); u_i1 = (double*)malloc(sizeof(double) * narea); pebs = (double*)malloc(sizeof(double) * narea); u_cpu = (double*)malloc(sizeof(double) * narea); start_lake_log("lake.log"); lake_log("running %s with (%d x %d) grid, until %f, with %d threads\n", argv[0], npoints, npoints, end_time, nthreads); printf("running %s with (%d x %d) grid, until %f, with %d threads\n", argv[0], npoints, npoints, end_time, nthreads); /* initialize the simulation */ h = (XMAX - XMIN)/npoints; lake_log("grid step size is %f\n",h); #ifdef __DEBUG lake_log("initializing pebbles\n"); #endif init_pebbles(pebs, npebs, npoints); #ifdef __DEBUG lake_log("initializing u0, u1\n"); #endif init(u_i0, pebs, npoints); init(u_i1, pebs, npoints); /* print the initial configuration */ #ifdef __DEBUG lake_log("printing initial configuration file\n"); #endif print_heatmap("lake_i.dat", u_i0, npoints, h); /* time, run the simulation */ #ifdef __DEBUG lake_log("beginning simulation\n"); #endif gettimeofday(&cpu_start, NULL); run_sim(u_cpu, u_i0, u_i1, pebs, npoints, h, end_time, nthreads); gettimeofday(&cpu_end, NULL); elapsed_cpu = ((cpu_end.tv_sec + cpu_end.tv_usec * 1e-6)-( cpu_start.tv_sec + cpu_start.tv_usec * 1e-6)); lake_log("\nSimulation took %f seconds\n", elapsed_cpu); printf("Simulation took %f seconds\n", elapsed_cpu); /* print the final configuration */ #ifdef __DEBUG lake_log("printing final configuration file\n"); #endif print_heatmap("lake_f.dat", u_cpu, npoints, h); #ifdef __DEBUG lake_log("freeing memory\n"); #endif /* free memory */ free(u_i0); free(u_i1); free(pebs); free(u_cpu); stop_lake_log(); return 1; } /***************************** * run_sim * * Input * ---------- * double *u0 - the inital configuation * double *u1 - the intial + 1 configuration * double *pebbles - the array of pebbles * int n - the grid size * double h - the grid step size * double end_time - the final time * int nthreads - the number of threads to use * * Output * ---------- * double *u - the final configuration * * Description * ---------- * run_sim is the main driver of the program. It takes in the inital * configuration and parameters, and runs them until end_time is reached. * *******************************/ void run_sim(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time, int nthreads) { /* Note To Graders: I have the #pragmas sorted. The top pragma is always openmp and the bottom is for openACC. Please comment/uncomment as necessary. */ /* arrays used in the calculation */ double un[n][n], uc[n][n], uo[n][n], pebs[n][n]; /* time vars */ double t, dt; int i, j; /* allocate the calculation arrays */ /* put the inital configurations into the calculation arrays */ memcpy(uo, u0, sizeof(double) * n * n); memcpy(uc, u1, sizeof(double) * n * n); memcpy(pebs, pebbles, sizeof(double) * n * n); /* start at t=0.0 */ t = 0.; /* this is probably not ideal. In principal, we should * keep the time-step at the size determined by the * CFL condition * * dt = h / vel_max * * where vel_max is the maximum velocity in the current * model. The condition dt = h/2. should suffice, but * be aware the possibility exists for madness and mayhem */ dt = h / 2.; /* loop until time >= end_time */ // omp_set_num_threads(nthreads); #pragma opm parallel num_threads(nthreads) // #pragma acc data copy(uc, uo, un, pebs) // Move data to GPU (copy out is... //... implicit.) while(1) { /* run a central finite differenmcing scheme to solve * the wave equation in 2D */ #pragma omp parallel for schedule(static) private(i,j) shared(un, uc, uo, pebs, n) num_threads(nthreads/2) for( i = 0; i < n; i++) { #pragma omp parallel for schedule(static, 2*n/nthreads) shared(un, uc, uo, pebs, n) num_threads(nthreads/2) private(j) for( j = 0; j < n; j++) { /* impose the u|_s = 0 boundary conditions */ if( i == 0 || i == n - 1 || j == 0 || j == n - 1) { un[i][j] = 0.; } /* otherwise do the FD scheme */ else { un[i][j] = 2*uc[i][j] - uo[i][j] + VSQR *(dt * dt) *((uc[i][j-1] + uc[i][j+1] + uc[i+1][j] + uc[i-1][j] + 0.25 * (uc[i-1][j-1] + uc[i+1][j-1]+ uc[i-1][j+1] + uc[i+1][j+1]) - 5 * uc[i][j])/(h * h) + f(pebs[i][j],t)); } } } #pragma omp parallel for schedule(static) private(i,j) shared(un, uc, uo, n) num_threads(nthreads/2) /* update the calculation arrays for the next time step */ // #pragma acc parallel loop for( i = 0; i < n; i++ ) { #pragma omp parallel for schedule(static, 2*n/nthreads) private(j) shared(n, un, uc, uo) for ( j = 0; j < n; j++ ) { uo[i][j] = uc[i][j]; uc[i][j] = un[i][j]; } } /* have we reached the end? */ if(!tpdt(&t,dt,end_time)) break; } /* cpy the last updated to the output array */ memcpy(u, un, sizeof(double) * n * n); } /***************************** * init_pebbles * * Input * ---------- * int pn - the number of pebbles * int n - the grid size * * Output * ---------- * double *p - an array (dimensioned same as the grid) that * gives the inital pebble size. * * Description * ---------- * init_pebbles creates a random scattering of some pn pebbles, * along with a random size. The range of the can be adjusted by changing * the constant MAX_PSZ. * *******************************/ void init_pebbles(double *p, int pn, int n) { int i, j, k, idx; int sz; srand( time(NULL) ); /* set to zero */ memset(p, 0, sizeof(double) * n * n); for( k = 0; k < pn ; k++ ) { /* the offset is to ensure that no pebbles * are spawned on the very edge of the grid */ i = rand() % (n - 4) + 2; j = rand() % (n - 4) + 2; sz = rand() % MAX_PSZ; idx = j + i * n; p[idx] = (double) sz; } } /***************************** * f * * Input * ---------- * double p - the inital pebble value * double t - the current time * Returns * ---------- * the value of the "pebble" source term at time t * * Description * ---------- * Each pebbles influance on the surface will "fade" as * time marches forward (they may sink away, for instance). * This function models that - at large t ("large" defined * relative to the constant TSCALE) the pebble will have * little to no effect. * * NB: this function can be updated to model whatever behavior * you wish the pebbles to have - they could continually jump * up and down on the surface, driving more energic waves, for * example. ******************************/ double f(double p, double t) { return -expf(-TSCALE * t) * p; } int tpdt(double *t, double dt, double tf) { if((*t) + dt > tf) return 0; (*t) = (*t) + dt; return 1; } void init(double *u, double *pebbles, int n) { int i, j, idx; for(i = 0; i < n ; i++) { for(j = 0; j < n ; j++) { idx = j + i * n; u[idx] = f(pebbles[idx], 0.0); } } } /***************************** * error_u * * Input * ---------- * double *ua - error 1 * double *ub - error 2 * int n - array extent * * Output * ---------- * double *uerr - array of errors * double *avgerr - pointer to the average error * * Description * ---------- * Calculates the relative error between ua and ub * ********************************/ void error_u(double *uerr, double *avgerr, double *ua, double *ub, int n) { int i, j, idx; (*avgerr) = 0.; for (i = 0; i < n; i++ ) { for (j = 0; j < n; j++ ) { idx = j + i * n; uerr[idx] = fabs((ua[idx]-ub[idx])/ua[idx]); (*avgerr) = (*avgerr) * ((double)idx/(double)(idx + 1)) + uerr[idx] / (double)(idx + 1); } } } /***************************** * print_heatmap * * Input * ---------- * char *filename - the output file name * double *u - the array to output * int n - the edge extent of u (ie, u is (n x n) * double h - the step size in u * Output * ---------- * None * * Description * ---------- * Outputs the array u to the file filename ********************************/ void print_heatmap(char *filename, double *u, int n, double h) { char full_filename[64]; int i, j, idx; dir_string(filename, full_filename); FILE *fp = fopen(full_filename, "w"); for( i = 0; i < n; i++ ) { for( j = 0; j < n; j++ ) { idx = j + i * n; fprintf(fp, "%f %f %f\n", i*h, j*h, u[idx]); } } fclose(fp); }
GB_unaryop__minv_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_fp32_fp32 // op(A') function: GB_tran__minv_fp32_fp32 // C type: float // A type: float // cast: float cij = (float) aij // unaryop: cij = (1.0F)/aij #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = (1.0F)/x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_fp32_fp32 ( float *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
MasterBeginLink.c
int x; int main () { #pragma omp master { 11; } #pragma omp master { int x; } }
calcFuncValsC.c
#include <mex.h> #include <math.h> #include <stdlib.h> #include <time.h> #include <string.h> void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { /* Input variables */ double *singleton = mxGetPr(prhs[0]); double *prediction = mxGetPr(prhs[1]); /* intern variables and pointers */ double* q_c_singleton = NULL; double* q_c_data = NULL; double tmp; const mwSize *dim_array; dim_array = mxGetDimensions(prhs[0]); int i,j,k,idx; int numRows = dim_array[0]; int numBounds = dim_array[1]; int numColumnsPred = dim_array[2]; /* 2-D matrix with [numBounds,numColumnsPred] */ plhs[0] = mxCreateDoubleMatrix(1,numBounds*numColumnsPred,mxREAL); q_c_singleton = mxGetPr(plhs[0]); /* 2-D matrix with [numColumnsPred,numBounds] */ plhs[1] = mxCreateDoubleMatrix(1,numBounds*numColumnsPred,mxREAL); q_c_data = mxGetPr(plhs[1]); /* negative entropy of q_c */ #pragma omp parallel for private(tmp,k,i,idx) for (j=0; j < numColumnsPred; j++) { for (k=0; k < numBounds; k++) { tmp = 0; idx = j*numBounds*numRows + k*numRows; for (i=0; i < numRows; i++) { if (singleton[idx + i] > 0) { tmp += singleton[idx+i]*log(singleton[idx+i]); } } q_c_singleton[k + j*numBounds] = -tmp; } } /* data term */ #pragma omp parallel for private(tmp,k,i,idx) for (j=0; j < numColumnsPred; j++) { /* idx = j*numBounds*numRows; tmp = 0; for (i=0; i < numRows; i++) { if (singleton[idx + i] > 0 & prediction[idx+i] > 0) { tmp += singleton[idx + i]*log(prediction[idx + i]); } } q_c_data[j] = -tmp;*/ for (k=0; k < numBounds; k++) { for (j=0; j < numColumnsPred; j++) { idx = j*numBounds*numRows + k*numRows; tmp = 0; for (i=0; i < numRows; i++) { if (singleton[idx + i] > 0 && prediction[idx+i] > 0) { tmp += singleton[idx + i]*log(prediction[idx + i]); } } q_c_data[k*numColumnsPred+j] = -tmp; } } } }
GB_binop__times_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__times_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__times_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__times_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_uint64) // A*D function (colscale): GB (_AxD__times_uint64) // D*A function (rowscale): GB (_DxB__times_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__times_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__times_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_uint64) // C=scalar+B GB (_bind1st__times_uint64) // C=scalar+B' GB (_bind1st_tran__times_uint64) // C=A+scalar GB (_bind2nd__times_uint64) // C=A'+scalar GB (_bind2nd_tran__times_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_UINT64 || GxB_NO_TIMES_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__times_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__times_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__times_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ProgressBar.h
#pragma once #include <cassert> #include <cstdint> #include <iostream> #include <ostream> #include <omp.h> // A textual indicator of progress towards some goal. class ProgressBar { public: // Constructs an uninitialized progress bar. explicit ProgressBar(bool verbose = true, std::ostream& os = std::cout) : os(os), numSteps(0), stepsDone(0), percentageDone(0), percentageOutputInterval(20), dotOutputInterval(5), verbose(verbose) {} // Constructs a progress bar with the specified number of steps. template <typename ArithmeticT> explicit ProgressBar(ArithmeticT numSteps, bool verbose = true, std::ostream& os = std::cout) : ProgressBar(verbose, os) { init(numSteps); } // Initialize the progress bar with the specified number of steps. void init(const int64_t steps) { assert(steps >= 0); numSteps = steps; stepsDone = 0; percentageDone = 0; if (verbose) os << "0% " << std::flush; } // Set the percentage points between two printed percentages. void setPercentageOutputInterval(const int points) { percentageOutputInterval = points; } // Set the percentage points between two printed dots. void setDotOutputInterval(const int points) { dotOutputInterval = points; } // Advances the progress bar to the specified step. void advanceTo(const int64_t step) { if (!verbose) return; assert(step >= stepsDone); assert(step <= numSteps); stepsDone = step; print(stepsDone * 100 / numSteps); } // Advances the progress bar to 100 %. void finish() { advanceTo(numSteps); } // Advances the progress bar by one step. void operator++() { if (!verbose) return; int64_t done; #pragma omp atomic capture done = ++stepsDone; #ifdef _OPENMP if (omp_get_thread_num() == 0) #endif print(done * 100 / numSteps); } // Advances the progress bar by the specified number of steps. void operator+=(const int64_t steps) { assert(steps >= 0); if (!verbose) return; int64_t done; #pragma omp atomic capture done = stepsDone += steps; #ifdef _OPENMP if (omp_get_thread_num() == 0) #endif print(done * 100 / numSteps); } private: // Prints the progress bar until the specified percentage. void print(const int until) { assert(until <= 100); for (int i = percentageDone + 1; i <= until; ++i) if (i % percentageOutputInterval == 0) os << " " << i << "% " << std::flush; else if (i % dotOutputInterval == 0) os << "." << std::flush; percentageDone = until; } std::ostream& os; // The output stream the progress bar is printed to. int64_t numSteps; // The number of steps that have to be done. int64_t stepsDone; // The number of steps that have already been done. int percentageDone; // The percentage that has already been done. int percentageOutputInterval; // Percentage points between two printed percentages. int dotOutputInterval; // Percentage points between two printed dots. bool verbose; // Indicates if the progress bar should be printed. };
Graph.h
/* * Graph.h * * Created on: 01.06.2014 * Author: Christian Staudt (christian.staudt@kit.edu), Klara Reichard (klara.reichard@gmail.com), Marvin Ritter (marvin.ritter@gmail.com) */ #ifndef GRAPH_H_ #define GRAPH_H_ #include <algorithm> #include <vector> #include <stack> #include <queue> #include <utility> #include <stdexcept> #include <functional> #include <unordered_set> #include "../Globals.h" #include "Coordinates.h" #include "../viz/Point.h" #include "../auxiliary/Random.h" #include "../auxiliary/FunctionTraits.h" #include "../auxiliary/Log.h" namespace NetworKit { /** * A weighted edge used for the graph constructor with * initializer list syntax. */ struct WeightedEdge { node u, v; edgeweight weight; WeightedEdge(node u, node v, edgeweight w) : u(u), v(v), weight(w) { } }; inline bool operator<(const WeightedEdge& e1, const WeightedEdge& e2) { return e1.weight < e2.weight; } struct Edge { node u, v; Edge(node _u, node _v, bool sorted = false) { if (sorted) { u = std::min(_u, _v); v = std::max(_u, _v); } else { u = _u; v = _v; } } }; inline bool operator==(const Edge& e1, const Edge& e2) { return e1.u == e2.u && e1.v == e2.v; } } namespace std { template<> struct hash<NetworKit::Edge> { size_t operator()(const NetworKit::Edge& e) const { return hash_node(e.u) ^ hash_node(e.v); } hash<NetworKit::node> hash_node; }; } namespace NetworKit { /** * @ingroup graph * A graph (with optional weights) and parallel iterator methods. */ class Graph final { friend class ParallelPartitionCoarsening; friend class GraphBuilder; private: // graph attributes count id; //!< unique graph id, starts at 0 std::string name; //!< name of the graph, initially G#ID // scalars count n; //!< current number of nodes count m; //!< current number of edges count storedNumberOfSelfLoops; //!< current number of self loops, edges which have the same origin and target node z; //!< current upper bound of node ids, z will be the id of the next node edgeid omega; //!< current upper bound of edge ids, will be the id of the next edge count t; //!< current time step bool weighted; //!< true if the graph is weighted, false otherwise bool directed; //!< true if the graph is directed, false otherwise bool edgesIndexed; //!< true if edge ids have been assigned // per node data std::vector<bool> exists; //!< exists[v] is true if node v has not been removed from the graph Coordinates<float> coordinates; //!< coordinates of nodes (if present) std::vector<count> inDeg; //!< only used for directed graphs, number of edges incoming per node std::vector<count> outDeg; //!< degree of every node, zero if node was removed. For directed graphs only outgoing edges count std::vector< std::vector<node> > inEdges; //!< only used for directed graphs, inEdges[v] contains all nodes u that have an edge (u, v) std::vector< std::vector<node> > outEdges; //!< (outgoing) edges, for each edge (u, v) v is saved in outEdges[u] and for undirected also u in outEdges[v] std::vector< std::vector<edgeweight> > inEdgeWeights; //!< only used for directed graphs, same schema as inEdges std::vector< std::vector<edgeweight> > outEdgeWeights; //!< same schema (and same order!) as outEdges std::vector< std::vector<edgeid> > inEdgeIds; //!< only used for directed graphs, same schema as inEdges std::vector< std::vector<edgeid> > outEdgeIds; //!< same schema (and same order!) as outEdges /** * Returns the next unique graph id. */ count getNextGraphId(); /** * Returns the index of node u in the array of incoming edges of node v. (for directed graphs inEdges is searched, while for indirected outEdges is searched, which gives the same result as indexInOutEdgeArray). */ index indexInInEdgeArray(node v, node u) const; /** * Returns the index of node v in the array of outgoing edges of node u. */ index indexInOutEdgeArray(node u, node v) const; /** * Returns the edge weight of the outgoing edge of index i in the outgoing edges of node u * @param u The node * @param i The index * @return The weight of the outgoing edge or defaultEdgeWeight if the graph is unweighted */ template<bool hasWeights> inline edgeweight getOutEdgeWeight(node u, index i) const; /** * Returns the edge weight of the incoming edge of index i in the incoming edges of node u * * @param u The node * @param i The index in the incoming edge array * @return The weight of the incoming edge */ template<bool hasWeights> inline edgeweight getInEdgeWeight(node u, index i) const; /** * Returns the edge id of the edge of index i in the outgoing edges of node u * * @param u The node * @param i The index in the outgoing edges * @return The edge id */ template<bool graphHasEdgeIds> inline edgeid getOutEdgeId(node u, index i) const; /** * Returns the edge id of the edge of index i in the incoming edges of node u * * @param u The node * @param i The index in the incoming edges of u * @return The edge id */ template<bool graphHasEdgeIds> inline edgeid getInEdgeId(node u, index i) const; /** * @brief Returns if the edge (u, v) shall be used in the iteration of all edgesIndexed * * @param u The source node of the edge * @param v The target node of the edge * @return If the node shall be used, i.e. if v is not none and in the undirected case if u >= v */ template<bool graphIsDirected> inline bool useEdgeInIteration(node u, node v) const; /** * @brief Implementation of the for loop for outgoing edges of u * * Note: If all (valid) outgoing edges shall be considered, graphIsDirected needs to be set to true * * @param u The node * @param handle The handle that shall be executed for each edge * @return void */ template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline void forOutEdgesOfImpl(node u, L handle) const; /** * @brief Implementation of the for loop for incoming edges of u * * For undirected graphs, this is the same as forOutEdgesOfImpl but u and v are changed in the handle * * @param u The node * @param handle The handle that shall be executed for each edge * @return void */ template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline void forInEdgesOfImpl(node u, L handle) const; /** * @brief Implementation of the for loop for all edges, @see forEdges * * @param handle The handle that shall be executed for all edges * @return void */ template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline void forEdgeImpl(L handle) const; /** * @brief Parallel implementation of the for loop for all edges, @see parallelForEdges * * @param handle The handle that shall be executed for all edges * @return void */ template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline void parallelForEdgesImpl(L handle) const; /** * @brief Summation variant of the parallel for loop for all edges, @see parallelSumForEdges * * @param handle The handle that shall be executed for all edges * @return void */ template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline double parallelSumForEdgesImpl(L handle) const; /* * In the following definition, Aux::FunctionTraits is used in order to only execute lambda functions * with the appropriate parameters. The decltype-return type is used for determining the return type of * the lambda (needed for summation) but also determines if the lambda accepts the correct number of parameters. * Otherwise the return type declaration fails and the function is excluded from overload resoluation. * Then there are multiple possible lambdas with three (third parameter id or weight) and two (second parameter * can be second node id or edge weight for neighbor iterators). This is checked using Aux::FunctionTraits and * std::enable_if. std::enable_if only defines the type member when the given bool is true, this bool comes from * std::is_same which compares two types. The function traits give either the parameter type or if it is out of bounds * they define type as void. */ /** * Triggers a static assert error when no other method is chosen. Because of the use of "..." as arguments, the priority * of this method is lower than the priority of the other methods. This method avoids ugly and unreadable template substitution * error messages from the other declarations. */ template<class F, void* = (void*)0> typename Aux::FunctionTraits<F>::result_type edgeLambda(F&f, ...) const { // the strange condition is used in order to delay the eveluation of the static assert to the moment when this function is actually used static_assert(! std::is_same<F, F>::value, "Your lambda does not support the required parameters or the parameters have the wrong type."); return std::declval<typename Aux::FunctionTraits<F>::result_type>(); // use the correct return type (this won't compile) } /** * Calls the given function f if its fourth argument is of the type edgeid and third of type edgeweight * Note that the decltype check is not enough as edgeweight can be casted to node and we want to assure that . */ template < class F, typename std::enable_if < (Aux::FunctionTraits<F>::arity >= 3) && std::is_same<edgeweight, typename Aux::FunctionTraits<F>::template arg<2>::type>::value && std::is_same<edgeid, typename Aux::FunctionTraits<F>::template arg<3>::type>::value >::type * = (void*)0 > auto edgeLambda(F &f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v, ew, id)) { return f(u, v, ew, id); } /** * Calls the given function f if its third argument is of the type edgeid, discards the edge weight * Note that the decltype check is not enough as edgeweight can be casted to node. */ template<class F, typename std::enable_if< (Aux::FunctionTraits<F>::arity >= 2) && std::is_same<edgeid, typename Aux::FunctionTraits<F>::template arg<2>::type>::value && std::is_same<node, typename Aux::FunctionTraits<F>::template arg<1>::type>::value /* prevent f(v, weight, eid) */ >::type* = (void*)0> auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v, id)) { return f(u, v, id); } /** * Calls the given function f if its third argument is of type edgeweight, discards the edge id * Note that the decltype check is not enough as node can be casted to edgeweight. */ template<class F, typename std::enable_if< (Aux::FunctionTraits<F>::arity >= 2) && std::is_same<edgeweight, typename Aux::FunctionTraits<F>::template arg<2>::type>::value >::type* = (void*)0> auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v, ew)) { return f(u, v, ew); } /** * Calls the given function f if it has only two arguments and the second argument is of type node, * discards edge weight and id * Note that the decltype check is not enough as edgeweight can be casted to node. */ template<class F, typename std::enable_if< (Aux::FunctionTraits<F>::arity >= 1) && std::is_same<node, typename Aux::FunctionTraits<F>::template arg<1>::type>::value >::type* = (void*)0> auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v)) { return f(u, v); } /** * Calls the given function f if it has only two arguments and the second argument is of type edgeweight, * discards the first node and the edge id * Note that the decltype check is not enough as edgeweight can be casted to node. */ template<class F, typename std::enable_if< (Aux::FunctionTraits<F>::arity >= 1) && std::is_same<edgeweight, typename Aux::FunctionTraits<F>::template arg<1>::type>::value >::type* = (void*)0> auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, ew)) { return f(v, ew); } /** * Calls the given function f if it has only one argument, discards the first * node id, the edge weight and the edge id */ template<class F, void* = (void*)0> auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(v)) { return f(v); } /** * Calls the given BFS handle with distance parameter */ template <class F> auto callBFSHandle(F &f, node u, count dist) const -> decltype(f(u, dist)) { return f(u, dist); } /** * Calls the given BFS handle without distance parameter */ template <class F> auto callBFSHandle(F &f, node u, count dist) const -> decltype(f(u)) { return f(u); } public: /** * Create a graph of @a n nodes. The graph has assignable edge weights if @a weighted is set to <code>true</code>. * If @a weighted is set to <code>false</code> each edge has edge weight 1.0 and any other weight assignment will * be ignored. * @param n Number of nodes. * @param weighted If set to <code>true</code>, the graph has edge weights. * @param directed If set to @c true, the graph will be directed. */ Graph(count n = 0, bool weighted = false, bool directed = false); Graph(const Graph& G, bool weighted, bool directed); /** * Generate a weighted graph from a list of edges. (Useful for small * graphs in unit tests that you do not want to read from a file.) * * @param[in] edges list of weighted edges */ Graph(std::initializer_list<WeightedEdge> edges); /** * Create a graph as copy of @a other. * @param other The graph to copy. */ Graph(const Graph& other) = default; /** Default move constructor */ Graph(Graph&& other) = default; /** Default destructor */ ~Graph() = default; /** Default move assignment operator */ Graph& operator=(Graph&& other) = default; /** Default copy assignment operator */ Graph& operator=(const Graph& other) = default; /** EDGE IDS **/ /** * Initially assign integer edge identifiers. * * @param force Force re-indexing of edges even if they have already been indexed */ void indexEdges(bool force = false); /** * Checks if edges have been indexed * * @return bool if edges have been indexed */ bool hasEdgeIds() const { return edgesIndexed; } /** * Get the id of the given edge. */ edgeid edgeId(node u, node v) const; /** * Get an upper bound for the edge ids in the graph. * @return An upper bound for the edge ids. */ index upperEdgeIdBound() const { return omega; } /** GRAPH INFORMATION **/ /** * Get the ID of this graph. The ID is a unique unsigned integer given to * every graph on construction. */ count getId() const { return id; } /** * Return the type of the graph. * Graph: not weighted, undirected * WeightedGraph: weighted, undirected * DirectedGraph: not weighted, directed * WeightedDirectedGraph: weighted, directed */ std::string typ() const; /** * Try to save some memory by shrinking internal data structures of the graph. Only run this * once you finished editing the graph. Otherwise it will cause unnecessary reallocation of * memory. */ void shrinkToFit(); /** * Compacts the adjacency arrays by re-using no longer neede slots from deleted edges. */ void compactEdges(); /** * Sorts the adjacency arrays by node id. While the running time is linear this * temporarily duplicates the memory. */ void sortEdges(); /** * Set name of graph to @a name. * @param name The name. */ void setName(std::string name) { this->name = name; } /* * Returns the name of the graph. * @return The name of the graph. */ std::string getName() const { return name; } /** * Returns a string representation of the graph. * @return A string representation. */ std::string toString() const; /* COPYING */ /* * Copies all nodes to a new graph * @return graph with the same nodes. */ Graph copyNodes() const; /* NODE MODIFIERS */ /** * Add a new node to the graph and return it. * @return The new node. */ node addNode(); /** * DEPRECATED: Coordinates should be handled outside the Graph class * like general node attributes. * * Add a new node to the graph with coordinates @a x and @y and return it. */ // TODO: remove method // [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]] node addNode(float x, float y); /** * Remove an isolated node @a v from the graph. * * @param u Node. * @note Although it would be convenient to remove all incident edges at the same time, * this causes complications for dynamic applications. Therefore, removeNode is an * atomic event. All incident edges need to be removed first and an exception is thrown * otherwise. */ void removeNode(node v); /** * Check if node @a v exists in the graph. * * @param v Node. * @return @c true if @a v exists, @c false otherwise. */ bool hasNode(node v) const { return (v < z) && this->exists[v]; } /** * Restores a previously deleted node @a v with its previous id in the graph. * * @param v Node. * */ void restoreNode(node v); // SET OPERATIONS /** * Appends another graph to this graph as a new subgraph. Performs node * id remapping. * @param G [description] */ void append(const Graph& G); /** * Modifies this graph to be the union of it and another graph. * Nodes with the same ids are identified with each other. * @param G [description] */ void merge(const Graph& G); // SUBGRAPHS Graph subgraphFromNodes(const std::unordered_set<node>& nodes) const; /** NODE PROPERTIES **/ /** * Returns the number of outgoing neighbors of @a v. * * @param v Node. * @return The number of outgoing neighbors. */ count degree(node v) const { return outDeg[v]; } /** * Get the number of incoming neighbors of @a v. * * @param v Node. * @return The number of incoming neighbors. * @note If the graph is not directed, the outgoing degree is returned. */ count degreeIn(node v) const { return directed ? inDeg[v] : outDeg[v]; } /** * Get the number of outgoing neighbors of @a v. * * @param v Node. * @return The number of outgoing neighbors. */ count degreeOut(node v) const { return outDeg[v]; } /** * Check whether @a v is isolated, i.e. degree is 0. * @param v Node. * @return @c true if the node is isolated (= degree is 0) */ bool isIsolated(node v) const { return outDeg[v] == 0 && (!directed || inDeg[v] == 0); } /** * Returns the weighted degree of @a v. * * @param v Node. * @return Weighted degree of @a v. * @note For directed graphs this is the sum of weights of all outgoing edges of @a v. */ edgeweight weightedDegree(node v) const; /** * Returns the volume of the @a v, which is the weighted degree with self-loops counted twice. * * @param v Node. * @return The volume of the @a v. */ edgeweight volume(node v) const; /** * Returns a random node of the graph. * @return A random node. */ node randomNode() const; /** * Returns a random neighbor of @a u and @c none if degree is zero. * * @param u Node. * @return A random neighbor of @a u. */ node randomNeighbor(node u) const; /* EDGE MODIFIERS */ /** * Insert an edge between the nodes @a u and @a v. If the graph is weighted you can optionally * set a weight for this edge. The default weight is 1.0. * Note: Multi-edges are not supported and will NOT be handled consistently by the graph data * structure. * @param u Endpoint of edge. * @param v Endpoint of edge. * @param weight Optional edge weight. */ void addEdge(node u, node v, edgeweight ew = defaultEdgeWeight); /** * Removes the undirected edge {@a u,@a v}. * @param u Endpoint of edge. * @param v Endpoint of edge. */ void removeEdge(node u, node v); /** * Removes all self-loops in the graph. */ void removeSelfLoops(); /** * Changes the edges {@a s1, @a t1} into {@a s1, @a t2} and the edge {@a s2, @a t2} into {@a s2, @a t1}. * * If there are edge weights or edge ids, they are preserved. Note that no check is performed if the swap is actually possible, i.e. does not generate duplicate edges. * * @param s1 The first source * @param t1 The first target * @param s2 The second source * @param t2 The second target */ void swapEdge(NetworKit::node s1, NetworKit::node t1, NetworKit::node s2, NetworKit::node t2); /** * Checks if undirected edge {@a u,@a v} exists in the graph. * @param u Endpoint of edge. * @param v Endpoint of edge. * @return <code>true</code> if the edge exists, <code>false</code> otherwise. */ bool hasEdge(node u, node v) const; /** * Returns a random edge. By default a random node u is chosen and then some random neighbor v. So the probability of choosing (u, v) highly * depends on the degree of u. * Setting uniformDistribution to true, will give you a real uniform distributed edge, but will be very slow. So only use uniformDistribution * for single calls outside of any loops. */ std::pair<node, node> randomEdge(bool uniformDistribution = false) const; /** * Returns a vector with nr random edges. The edges are chosen uniform random. */ std::vector< std::pair<node, node> > randomEdges(count nr) const; /* GLOBAL PROPERTIES */ /** * Returns <code>true</code> if this graph supports edge weights other than 1.0. * @return <code>true</code> if this graph supports edge weights other than 1.0. */ bool isWeighted() const { return weighted; } /** * Return @c true if this graph supports directed edges. * @return @c true if this graph supports directed edges. */ bool isDirected() const { return directed; } /** * Return <code>true</code> if graph contains no nodes. * @return <code>true</code> if graph contains no nodes. */ bool isEmpty() const { return n == 0; } /** * Return the number of nodes in the graph. * @return The number of nodes. */ count numberOfNodes() const { return n; } /** * Return the number of edges in the graph. * @return The number of edges. */ count numberOfEdges() const { return m; } /** * @return a pair (n, m) where n is the number of nodes and m is the number of edges */ std::pair<count, count> const size() { return {n, m}; }; /** * @return the density of the graph */ double density() const { count n = numberOfNodes(); count m = numberOfEdges(); count loops = numberOfSelfLoops(); m -= loops; double d; if (isDirected()) { d = m / (double) (n * (n-1)); } else { d = (2 * m) / (double) (n * (n-1)); } return d; } /** * Return the number of loops {v,v} in the graph. * @return The number of loops. * @note This involves calculation, so store result if needed multiple times. */ count numberOfSelfLoops() const; /** * Get an upper bound for the node ids in the graph. * @return An upper bound for the node ids. */ index upperNodeIdBound() const { return z; } /** * Check for invalid graph states, such as multi-edges. * @return False if the graph is in invalid state. */ bool checkConsistency() const; /* DYNAMICS */ /** * Trigger a time step - increments counter. */ void timeStep() { t++; } /** * Get time step counter. * @return Time step counter. */ count time() { return t; } /* COORDINATES */ /** * DEPRECATED: Coordinates should be handled outside the Graph class * like general node attributes. * * Sets the coordinate of @a v to @a value. * * @param v Node. * @param value The coordinate of @a v. */ // TODO: remove method // [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]] void setCoordinate(node v, Point<float> value) { coordinates.setCoordinate(v, value); } /** * DEPRECATED: Coordinates should be handled outside the Graph class * like general node attributes. * * Get the coordinate of @a v. * @param v Node. * @return The coordinate of @a v. */ // TODO: remove method // [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]] Point<float>& getCoordinate(node v) { return coordinates.getCoordinate(v); } /** * DEPRECATED: Coordinates should be handled outside the Graph class * like general node attributes. * * Get minimum coordinate of all coordinates with respect to dimension @a dim. * @param dim The dimension to search for minimum. * @return The minimum coordinate in dimension @a dim. */ // TODO: remove method // [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]] float minCoordinate(count dim) { return coordinates.minCoordinate(dim); } /** * DEPRECATED: Coordinates should be handled outside the Graph class * like general node attributes. * * Get maximum coordinate of all coordinates with respect to dimension @a dim. * @param dim The dimension to search for maximum. * @return The maximum coordinate in dimension @a dim. */ // TODO: remove method // [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]] float maxCoordinate(count dim) { return coordinates.maxCoordinate(dim); } /** * DEPRECATED: Coordinates should be handled outside the Graph class * like general node attributes. * * Initializes the coordinates for the nodes in graph. * @note This has to be called once and before you set coordinates. Call this method again if new nodes have * been added. */ // TODO: remove method // [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]] void initCoordinates() { coordinates.init(z); } /* EDGE ATTRIBUTES */ /** * Return edge weight of edge {@a u,@a v}. Returns 0 if edge does not exist. * BEWARE: Running time is \Theta(deg(u))! * * @param u Endpoint of edge. * @param v Endpoint of edge. * @return Edge weight of edge {@a u,@a v} or 0 if edge does not exist. */ edgeweight weight(node u, node v) const; /** * Set the weight of an edge. If the edge does not exist, * it will be inserted. * * @param[in] u endpoint of edge * @param[in] v endpoint of edge * @param[in] weight edge weight */ void setWeight(node u, node v, edgeweight ew); /** * Increase the weight of an edge. If the edge does not exist, * it will be inserted. * * @param[in] u endpoint of edge * @param[in] v endpoint of edge * @param[in] weight edge weight */ void increaseWeight(node u, node v, edgeweight ew); /* SUMS */ /** * Returns the sum of all edge weights. * @return The sum of all edge weights. */ edgeweight totalEdgeWeight() const; /* Collections */ /** * Get list of all nodes. * @return List of all nodes. */ std::vector<node> nodes() const; /** * Get list of edges as node pairs. * @return List of edges as node pairs. */ std::vector<std::pair<node, node> > edges() const; /** * Get list of neighbors of @a u. * * @param u Node. * @return List of neighbors of @a u. */ std::vector<node> neighbors(node u) const; /** * Get i-th (outgoing) neighbor of @a u. * WARNING: This function is deprecated or only temporary. * * @param u Node. * @param i index; should be in [0, degreeOut(u)) * @return @a i -th (outgoing) neighbor of @a u, or @c none if no such * neighbor exists. */ template<bool graphIsDirected> node getIthNeighbor(node u, index i) const { node v = outEdges[u][i]; if (useEdgeInIteration<graphIsDirected>(u, v)) return v; else return none; } /* Derivative Graphs */ /** * Return an undirected version of this graph. * * @return undirected graph. */ Graph toUndirected() const; /** * Return an unweighted version of this graph. * * @return unweighted graph. */ Graph toUnweighted() const; /** * Return the transpose of this graph. The graph must be directed. * * @return transpose of the graph. */ Graph transpose() const; /* NODE ITERATORS */ /** * Iterate over all nodes of the graph and call @a handle (lambda closure). * * @param handle Takes parameter <code>(node)</code>. */ template<typename L> void forNodes(L handle) const; /** * Iterate randomly over all nodes of the graph and call @a handle (lambda closure). * * @param handle Takes parameter <code>(node)</code>. */ template<typename L> void parallelForNodes(L handle) const; /** Iterate over all nodes of the graph and call @a handle (lambda closure) as long as @a condition remains true. * This allows for breaking from a node loop. * * @param condition Returning <code>false</code> breaks the loop. * @param handle Takes parameter <code>(node)</code>. */ template<typename C, typename L> void forNodesWhile(C condition, L handle) const; /** * Iterate randomly over all nodes of the graph and call @a handle (lambda closure). * * @param handle Takes parameter <code>(node)</code>. */ template<typename L> void forNodesInRandomOrder(L handle) const; /** * Iterate in parallel over all nodes of the graph and call handler (lambda closure). * Using schedule(guided) to remedy load-imbalances due to e.g. unequal degree distribution. * * @param handle Takes parameter <code>(node)</code>. */ template<typename L> void balancedParallelForNodes(L handle) const; /** * Iterate over all undirected pairs of nodes and call @a handle (lambda closure). * * @param handle Takes parameters <code>(node, node)</code>. */ template<typename L> void forNodePairs(L handle) const; /** * Iterate over all undirected pairs of nodes in parallel and call @a handle (lambda closure). * * @param handle Takes parameters <code>(node, node)</code>. */ template<typename L> void parallelForNodePairs(L handle) const; /* EDGE ITERATORS */ /** * Iterate over all edges of the const graph and call @a handle (lambda closure). * * @param handle Takes parameters <code>(node, node)</code>, <code>(node, node, edgweight)</code>, <code>(node, node, edgeid)</code> or <code>(node, node, edgeweight, edgeid)</code>. */ template<typename L> void forEdges(L handle) const; /** * Iterate in parallel over all edges of the const graph and call @a handle (lambda closure). * * @param handle Takes parameters <code>(node, node)</code> or <code>(node, node, edgweight)</code>, <code>(node, node, edgeid)</code> or <code>(node, node, edgeweight, edgeid)</code>. */ template<typename L> void parallelForEdges(L handle) const; /* NEIGHBORHOOD ITERATORS */ /** * Iterate over all neighbors of a node and call @a handle (lamdba closure). * * @param u Node. * @param handle Takes parameter <code>(node)</code> or <code>(node, edgeweight)</code> which is a neighbor of @a u. * @note For directed graphs only outgoing edges from @a u are considered. * A node is its own neighbor if there is a self-loop. * */ template<typename L> void forNeighborsOf(node u, L handle) const; /** * Iterate over all incident edges of a node and call @a handle (lamdba closure). * * @param u Node. * @param handle Takes parameters <code>(node, node)</code>, <code>(node, node, edgeweight)</code>, <code>(node, node, edgeid)</code> or <code>(node, node, edgeweight, edgeid)</code> where the first node is @a u and the second is a neighbor of @a u. * @note For undirected graphs all edges incident to @a u are also outgoing edges. */ template<typename L> void forEdgesOf(node u, L handle) const; /** * Iterate over all neighbors of a node and call handler (lamdba closure). * For directed graphs only incoming edges from u are considered. */ template<typename L> void forInNeighborsOf(node u, L handle) const; /** * Iterate over all incoming edges of a node and call handler (lamdba closure). * @note For undirected graphs all edges incident to u are also incoming edges. * * Handle takes parameters (u, v) or (u, v, w) where w is the edge weight. */ template<typename L> void forInEdgesOf(node u, L handle) const; /* REDUCTION ITERATORS */ /** * Iterate in parallel over all nodes and sum (reduce +) the values returned by the handler */ template<typename L> double parallelSumForNodes(L handle) const; /** * Iterate in parallel over all edges and sum (reduce +) the values returned by the handler */ template<typename L> double parallelSumForEdges(L handle) const; /* GRAPH SEARCHES */ /** * Iterate over nodes in breadth-first search order starting from r until connected component * of r has been visited. * * @param r Node. * @param handle Takes parameter <code>(node)</code>. */ template<typename L> void BFSfrom(node r, L handle) const; template<typename L> void BFSfrom(const std::vector<node> &startNodes, L handle) const; template<typename L> void BFSEdgesFrom(node r, L handle) const; /** * Iterate over nodes in depth-first search order starting from r until connected component * of r has been visited. * * @param r Node. * @param handle Takes parameter <code>(node)</code>. */ template<typename L> void DFSfrom(node r, L handle) const; template<typename L> void DFSEdgesFrom(node r, L handle) const; }; /* NODE ITERATORS */ template<typename L> void Graph::forNodes(L handle) const { for (node v = 0; v < z; ++v) { if (exists[v]) { handle(v); } } } template<typename L> void Graph::parallelForNodes(L handle) const { #pragma omp parallel for for (node v = 0; v < z; ++v) { if (exists[v]) { handle(v); } } } template<typename C, typename L> void Graph::forNodesWhile(C condition, L handle) const { for (node v = 0; v < z; ++v) { if (exists[v]) { if (!condition()) { break; } handle(v); } } } template<typename L> void Graph::forNodesInRandomOrder(L handle) const { std::vector<node> randVec = nodes(); std::shuffle(randVec.begin(), randVec.end(), Aux::Random::getURNG()); for (node v : randVec) { handle(v); } } template<typename L> void Graph::balancedParallelForNodes(L handle) const { #pragma omp parallel for schedule(guided) // TODO: define min block size (and test it!) for (node v = 0; v < z; ++v) { if (exists[v]) { handle(v); } } } template<typename L> void Graph::forNodePairs(L handle) const { for (node u = 0; u < z; ++u) { if (exists[u]) { for (node v = u + 1; v < z; ++v) { if (exists[v]) { handle(u, v); } } } } } template<typename L> void Graph::parallelForNodePairs(L handle) const { #pragma omp parallel for schedule(guided) for (node u = 0; u < z; ++u) { if (exists[u]) { for (node v = u + 1; v < z; ++v) { if (exists[v]) { handle(u, v); } } } } } /* EDGE ITERATORS */ /* HELPERS */ template<bool hasWeights> // implementation for weighted == true inline edgeweight Graph::getOutEdgeWeight(node u, index i) const { return outEdgeWeights[u][i]; } template<> // implementation for weighted == false inline edgeweight Graph::getOutEdgeWeight<false>(node, index) const { return defaultEdgeWeight; } template<bool hasWeights> // implementation for weighted == true inline edgeweight Graph::getInEdgeWeight(node u, index i) const { return inEdgeWeights[u][i]; } template<> // implementation for weighted == false inline edgeweight Graph::getInEdgeWeight<false>(node, index) const { return defaultEdgeWeight; } template<bool graphHasEdgeIds> // implementation for hasEdgeIds == true inline edgeid Graph::getOutEdgeId(node u, index i) const { return outEdgeIds[u][i]; } template<> // implementation for hasEdgeIds == false inline edgeid Graph::getOutEdgeId<false>(node, index) const { return 0; } template<bool graphHasEdgeIds> // implementation for hasEdgeIds == true inline edgeid Graph::getInEdgeId(node u, index i) const { return inEdgeIds[u][i]; } template<> // implementation for hasEdgeIds == false inline edgeid Graph::getInEdgeId<false>(node, index) const { return 0; } template<bool graphIsDirected> // implementation for graphIsDirected == true inline bool Graph::useEdgeInIteration(node u, node v) const { return v != none; } template<> // implementation for graphIsDirected == false inline bool Graph::useEdgeInIteration<false>(node u, node v) const { return u >= v; } template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline void Graph::forOutEdgesOfImpl(node u, L handle) const { for (index i = 0; i < outEdges[u].size(); ++i) { node v = outEdges[u][i]; if (useEdgeInIteration<graphIsDirected>(u, v)) { edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i), getOutEdgeId<graphHasEdgeIds>(u, i)); } } } template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline void Graph::forInEdgesOfImpl(node u, L handle) const { if (graphIsDirected) { for (index i = 0; i < inEdges[u].size(); i++) { node v = inEdges[u][i]; if (useEdgeInIteration<true>(u, v)) { edgeLambda<L>(handle, u, v, getInEdgeWeight<hasWeights>(u, i), getInEdgeId<graphHasEdgeIds>(u, i)); } } } else { for (index i = 0; i < outEdges[u].size(); ++i) { node v = outEdges[u][i]; if (useEdgeInIteration<true>(u, v)) { edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i), getOutEdgeId<graphHasEdgeIds>(u, i)); } } } } template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline void Graph::forEdgeImpl(L handle) const { for (node u = 0; u < z; ++u) { forOutEdgesOfImpl<graphIsDirected, hasWeights, graphHasEdgeIds, L>(u, handle); } } template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline void Graph::parallelForEdgesImpl(L handle) const { #pragma omp parallel for schedule(guided) for (node u = 0; u < z; ++u) { forOutEdgesOfImpl<graphIsDirected, hasWeights, graphHasEdgeIds, L>(u, handle); } } template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline double Graph::parallelSumForEdgesImpl(L handle) const { double sum = 0.0; #pragma omp parallel for reduction(+:sum) for (node u = 0; u < z; ++u) { for (index i = 0; i < outEdges[u].size(); ++i) { node v = outEdges[u][i]; // undirected, do not iterate over edges twice // {u, v} instead of (u, v); if v == none, u > v is not fulfilled if (useEdgeInIteration<graphIsDirected>(u, v)) { sum += edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i), getOutEdgeId<graphHasEdgeIds>(u, i)); } } } return sum; } template<typename L> void Graph::forEdges(L handle) const { switch (weighted + 2 * directed + 4 * edgesIndexed) { case 0: // unweighted, undirected, no edgeIds forEdgeImpl<false, false, false, L>(handle); break; case 1: // weighted, undirected, no edgeIds forEdgeImpl<false, true, false, L>(handle); break; case 2: // unweighted, directed, no edgeIds forEdgeImpl<true, false, false, L>(handle); break; case 3: // weighted, directed, no edgeIds forEdgeImpl<true, true, false, L>(handle); break; case 4: // unweighted, undirected, with edgeIds forEdgeImpl<false, false, true, L>(handle); break; case 5: // weighted, undirected, with edgeIds forEdgeImpl<false, true, true, L>(handle); break; case 6: // unweighted, directed, with edgeIds forEdgeImpl<true, false, true, L>(handle); break; case 7: // weighted, directed, with edgeIds forEdgeImpl<true, true, true, L>(handle); break; } } template<typename L> void Graph::parallelForEdges(L handle) const { switch (weighted + 2 * directed + 4 * edgesIndexed) { case 0: // unweighted, undirected, no edgeIds parallelForEdgesImpl<false, false, false, L>(handle); break; case 1: // weighted, undirected, no edgeIds parallelForEdgesImpl<false, true, false, L>(handle); break; case 2: // unweighted, directed, no edgeIds parallelForEdgesImpl<true, false, false, L>(handle); break; case 3: // weighted, directed, no edgeIds parallelForEdgesImpl<true, true, false, L>(handle); break; case 4: // unweighted, undirected, with edgeIds parallelForEdgesImpl<false, false, true, L>(handle); break; case 5: // weighted, undirected, with edgeIds parallelForEdgesImpl<false, true, true, L>(handle); break; case 6: // unweighted, directed, with edgeIds parallelForEdgesImpl<true, false, true, L>(handle); break; case 7: // weighted, directed, with edgeIds parallelForEdgesImpl<true, true, true, L>(handle); break; } } /* NEIGHBORHOOD ITERATORS */ template<typename L> void Graph::forNeighborsOf(node u, L handle) const { forEdgesOf(u, handle); } template<typename L> void Graph::forEdgesOf(node u, L handle) const { switch (weighted + 2 * edgesIndexed) { case 0: //not weighted, no edge ids forOutEdgesOfImpl<true, false, false, L>(u, handle); break; case 1: //weighted, no edge ids forOutEdgesOfImpl<true, true, false, L>(u, handle); break; case 2: //not weighted, with edge ids forOutEdgesOfImpl<true, false, true, L>(u, handle); break; case 3: //weighted, with edge ids forOutEdgesOfImpl<true, true, true, L>(u, handle); break; } } template<typename L> void Graph::forInNeighborsOf(node u, L handle) const { forInEdgesOf(u, handle); } template<typename L> void Graph::forInEdgesOf(node u, L handle) const { switch (weighted + 2 * directed + 4 * edgesIndexed) { case 0: //unweighted, undirected, no edge ids forInEdgesOfImpl<false, false, false, L>(u, handle); break; case 1: //weighted, undirected, no edge ids forInEdgesOfImpl<false, true, false, L>(u, handle); break; case 2: //unweighted, directed, no edge ids forInEdgesOfImpl<true, false, false, L>(u, handle); break; case 3: //weighted, directed, no edge ids forInEdgesOfImpl<true, true, false, L>(u, handle); break; case 4: //unweighted, undirected, with edge ids forInEdgesOfImpl<false, false, true, L>(u, handle); break; case 5: //weighted, undirected, with edge ids forInEdgesOfImpl<false, true, true, L>(u, handle); break; case 6: //unweighted, directed, with edge ids forInEdgesOfImpl<true, false, true, L>(u, handle); break; case 7: //weighted, directed, with edge ids forInEdgesOfImpl<true, true, true, L>(u, handle); break; } } /* REDUCTION ITERATORS */ template<typename L> double Graph::parallelSumForNodes(L handle) const { double sum = 0.0; #pragma omp parallel for reduction(+:sum) for (node v = 0; v < z; ++v) { if (exists[v]) { sum += handle(v); } } return sum; } template<typename L> double Graph::parallelSumForEdges(L handle) const { double sum = 0.0; switch (weighted + 2 * directed + 4 * edgesIndexed) { case 0: // unweighted, undirected, no edge ids sum = parallelSumForEdgesImpl<false, false, false, L>(handle); break; case 1: // weighted, undirected, no edge ids sum = parallelSumForEdgesImpl<false, true, false, L>(handle); break; case 2: // unweighted, directed, no edge ids sum = parallelSumForEdgesImpl<true, false, false, L>(handle); break; case 3: // weighted, directed, no edge ids sum = parallelSumForEdgesImpl<true, true, false, L>(handle); break; case 4: // unweighted, undirected, with edge ids sum = parallelSumForEdgesImpl<false, false, true, L>(handle); break; case 5: // weighted, undirected, with edge ids sum = parallelSumForEdgesImpl<false, true, true, L>(handle); break; case 6: // unweighted, directed, with edge ids sum = parallelSumForEdgesImpl<true, false, true, L>(handle); break; case 7: // weighted, directed, with edge ids sum = parallelSumForEdgesImpl<true, true, true, L>(handle); break; } return sum; } /* GRAPH SEARCHES */ template<typename L> void Graph::BFSfrom(node r, L handle) const { std::vector<node> startNodes(1, r); BFSfrom(startNodes, handle); } template<typename L> void Graph::BFSfrom(const std::vector<node> &startNodes, L handle) const { std::vector<bool> marked(z); std::queue<node> q, qNext; count dist = 0; // enqueue start nodes for (node u : startNodes) { q.push(u); marked[u] = true; } do { node u = q.front(); q.pop(); // apply function callBFSHandle(handle, u, dist); forNeighborsOf(u, [&](node v) { if (!marked[v]) { qNext.push(v); marked[v] = true; } }); if (q.empty() && !qNext.empty()) { q.swap(qNext); ++dist; } } while (!q.empty()); } template<typename L> void Graph::BFSEdgesFrom(node r, L handle) const { std::vector<bool> marked(z); std::queue<node> q; q.push(r); // enqueue root marked[r] = true; do { node u = q.front(); q.pop(); // apply function forNeighborsOf(u, [&](node, node v, edgeweight w, edgeid eid) { if (!marked[v]) { handle(u, v, w, eid); q.push(v); marked[v] = true; } }); } while (!q.empty()); } template<typename L> void Graph::DFSfrom(node r, L handle) const { std::vector<bool> marked(z); std::stack<node> s; s.push(r); // enqueue root marked[r] = true; do { node u = s.top(); s.pop(); // apply function handle(u); forNeighborsOf(u, [&](node v) { if (!marked[v]) { s.push(v); marked[v] = true; } }); } while (!s.empty()); } template<typename L> void Graph::DFSEdgesFrom(node r, L handle) const { std::vector<bool> marked(z); std::stack<node> s; s.push(r); // enqueue root marked[r] = true; do { node u = s.top(); s.pop(); // apply function forNeighborsOf(u, [&](node v) { if (!marked[v]) { handle(u, v); s.push(v); marked[v] = true; } }); } while (!s.empty()); } } /* namespace NetworKit */ #endif /* GRAPH_H_ */
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/identify.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/magick.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/segment.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ typedef struct _EdgeInfo { double left, right, top, bottom; } EdgeInfo; static double GetEdgeBackgroundCensus(const Image *image, const CacheView *image_view,const GravityType gravity,const size_t width, const size_t height,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double census; Image *edge_image; PixelInfo background, pixel; RectangleInfo edge_geometry; const Quantum *p; ssize_t y; /* Determine the percent of image background for this edge. */ switch (gravity) { case NorthWestGravity: case NorthGravity: default: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); break; } case NorthEastGravity: case EastGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); break; } case SouthEastGravity: case SouthGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); break; } case SouthWestGravity: case WestGravity: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); break; } } GetPixelInfoPixel(image,p,&background); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); artifact=GetImageArtifact(image,"trim:background-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); edge_geometry.width=width; edge_geometry.height=height; edge_geometry.x=x_offset; edge_geometry.y=y_offset; GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) return(0.0); census=0.0; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) census++; p+=GetPixelChannels(edge_image); } } census/=((double) edge_image->columns*edge_image->rows); edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); return(census); } static inline double GetMinEdgeBackgroundCensus(const EdgeInfo *edge) { double census; census=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top), edge->bottom); return(census); } static RectangleInfo GetEdgeBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double background_census, percent_background; EdgeInfo edge, vertex; Image *edge_image; RectangleInfo bounds; /* Get the image bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); SetGeometry(image,&bounds); edge_image=CloneImage(image,0,0,MagickTrue,exception); if (edge_image == (Image *) NULL) return(bounds); (void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page); (void) memset(&vertex,0,sizeof(vertex)); edge_view=AcquireVirtualCacheView(edge_image,exception); edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,WestGravity, 1,0,0,0,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,EastGravity, 1,0,0,0,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,NorthGravity, 0,1,0,0,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,SouthGravity, 0,1,0,0,exception); percent_background=1.0; artifact=GetImageArtifact(edge_image,"trim:percent-background"); if (artifact != (const char *) NULL) percent_background=StringToDouble(artifact,(char **) NULL)/100.0; percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon), 1.0); background_census=GetMinEdgeBackgroundCensus(&edge); for ( ; background_census < percent_background; background_census=GetMinEdgeBackgroundCensus(&edge)) { if ((bounds.width == 0) || (bounds.height == 0)) break; if (fabs(edge.left-background_census) < MagickEpsilon) { /* Trim left edge. */ vertex.left++; bounds.width--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.right-background_census) < MagickEpsilon) { /* Trim right edge. */ vertex.right++; bounds.width--; edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.top-background_census) < MagickEpsilon) { /* Trim top edge. */ vertex.top++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); continue; } if (fabs(edge.bottom-background_census) < MagickEpsilon) { /* Trim bottom edge. */ vertex.bottom++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); bounds.x=(ssize_t) vertex.left; bounds.y=(ssize_t) vertex.top; if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return(bounds); } MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; PixelInfo target[4], zero; RectangleInfo bounds; const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); artifact=GetImageArtifact(image,"trim:percent-background"); if (artifact != (const char *) NULL) return(GetEdgeBoundingBox(image,exception)); artifact=GetImageArtifact(image, "trim:edges"); if (artifact == (const char *) NULL) { bounds.width=0; bounds.height=0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; } else { char *edges, *p, *q; bounds.width=(size_t) image->columns; bounds.height=(size_t) image->rows; bounds.x=0; bounds.y=0; edges=AcquireString(artifact); q=edges; while ((p=StringToken(",",&q)) != (char *) NULL) { if (LocaleCompare(p,"north") == 0) bounds.y=(ssize_t) image->rows; if (LocaleCompare(p,"east") == 0) bounds.width=0; if (LocaleCompare(p,"south") == 0) bounds.height=0; if (LocaleCompare(p,"west") == 0) bounds.x=(ssize_t) image->columns; } edges=DestroyString(edges); } GetPixelInfo(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } GetPixelInfoPixel(image,p,&target[0]); GetPixelInfo(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[1]); GetPixelInfo(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[2]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,(ssize_t) image->rows-1,1,1,exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[3]); status=MagickTrue; GetPixelInfo(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; RectangleInfo bounding_box; const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,p,&pixel); if ((x < bounding_box.x) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; if ((x < (ssize_t) bounding_box.width) && (y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[3]) == MagickFalse)) { bounding_box.width=(size_t) x; bounding_box.height=(size_t) y; } p+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o n v e x H u l l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageConvexHull() returns the convex hull points of an image canvas. % % The format of the GetImageConvexHull method is: % % PointInfo *GetImageConvexHull(const Image *image, % size_t number_vertices,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_vertices: the number of vertices in the convex hull. % % o exception: return any errors or warnings in this structure. % */ static double LexicographicalOrder(PointInfo *a,PointInfo *b,PointInfo *c) { /* Order by x-coordinate, and in case of a tie, by y-coordinate. */ return((b->x-a->x)*(c->y-a->y)-(b->y-a->y)*(c->x-a->x)); } static PixelInfo GetEdgeBackgroundColor(const Image *image, const CacheView *image_view,ExceptionInfo *exception) { const char *artifact; double census[4], edge_census; PixelInfo background[4], edge_background; ssize_t i; /* Most dominant color of edges/corners is the background color of the image. */ artifact=GetImageArtifact(image,"convex-hull:background-color"); if (artifact == (const char *) NULL) artifact=GetImageArtifact(image,"background"); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i < 4; i++) { CacheView *edge_view; GravityType gravity; Image *edge_image; PixelInfo pixel; RectangleInfo edge_geometry; const Quantum *p; ssize_t y; census[i]=0.0; (void) memset(&edge_geometry,0,sizeof(edge_geometry)); switch (i) { case 0: default: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); gravity=WestGravity; edge_geometry.width=1; edge_geometry.height=0; break; } case 1: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); gravity=EastGravity; edge_geometry.width=1; edge_geometry.height=0; break; } case 2: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); gravity=NorthGravity; edge_geometry.width=0; edge_geometry.height=1; break; } case 3: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); gravity=SouthGravity; edge_geometry.width=0; edge_geometry.height=1; break; } } GetPixelInfoPixel(image,p,background+i); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,background+i, exception); GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) continue; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1, exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,background+i) == MagickFalse) census[i]++; p+=GetPixelChannels(edge_image); } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); } edge_census=(-1.0); for (i=0; i < 4; i++) if (census[i] > edge_census) { edge_background=background[i]; edge_census=census[i]; } return(edge_background); } void TraceConvexHull(PointInfo *vertices,size_t number_vertices, PointInfo ***monotone_chain,size_t *chain_length) { PointInfo **chain; ssize_t i; size_t demark, n; /* Construct the upper and lower hulls: rightmost to leftmost counterclockwise. */ chain=(*monotone_chain); n=0; for (i=0; i < (ssize_t) number_vertices; i++) { while ((n >= 2) && (LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0)) n--; chain[n++]=(&vertices[i]); } demark=n+1; for (i=(ssize_t) number_vertices-2; i >= 0; i--) { while ((n >= demark) && (LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0)) n--; chain[n++]=(&vertices[i]); } *chain_length=n; } MagickExport PointInfo *GetImageConvexHull(const Image *image, size_t *number_vertices,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MemoryInfo *monotone_info, *vertices_info; PixelInfo background; PointInfo *convex_hull, **monotone_chain, *vertices; size_t n; ssize_t y; /* Identify convex hull vertices of image foreground object(s). */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_vertices=0; vertices_info=AcquireVirtualMemory(image->columns,image->rows* sizeof(*vertices)); monotone_info=AcquireVirtualMemory(2*image->columns,2* image->rows*sizeof(*monotone_chain)); if ((vertices_info == (MemoryInfo *) NULL) || (monotone_info == (MemoryInfo *) NULL)) { if (monotone_info != (MemoryInfo *) NULL) monotone_info=(MemoryInfo *) RelinquishVirtualMemory(monotone_info); if (vertices_info != (MemoryInfo *) NULL) vertices_info=RelinquishVirtualMemory(vertices_info); return((PointInfo *) NULL); } vertices=(PointInfo *) GetVirtualMemoryBlob(vertices_info); monotone_chain=(PointInfo **) GetVirtualMemoryBlob(monotone_info); image_view=AcquireVirtualCacheView(image,exception); background=GetEdgeBackgroundColor(image,image_view,exception); status=MagickTrue; n=0; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) { vertices[n].x=(double) x; vertices[n].y=(double) y; n++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Return the convex hull of the image foreground object(s). */ TraceConvexHull(vertices,n,&monotone_chain,number_vertices); convex_hull=(PointInfo *) AcquireQuantumMemory(*number_vertices, sizeof(*convex_hull)); if (convex_hull != (PointInfo *) NULL) for (n=0; n < *number_vertices; n++) convex_hull[n]=(*monotone_chain[n]); monotone_info=RelinquishVirtualMemory(monotone_info); vertices_info=RelinquishVirtualMemory(vertices_info); return(convex_hull); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDepth() returns the depth of a particular image channel. % % The format of the GetImageDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL*QuantumRange) <= MaxMap) { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(p[i])]; } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif /* Compute pixel depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { QuantumAny range; range=GetQuantumRange(current_depth[id]); if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),range)) break; current_depth[id]++; } } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M i n i m u m B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMinimumBoundingBox() returns the points that form the minimum % bounding box around the image foreground objects with the "Rotating % Calipers" algorithm. The method also returns these properties: % minimum-bounding-box:area, minimum-bounding-box:width, % minimum-bounding-box:height, and minimum-bounding-box:angle. % % The format of the GetImageMinimumBoundingBox method is: % % PointInfo *GetImageMinimumBoundingBox(Image *image, % size_t number_vertices,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_vertices: the number of vertices in the bounding box. % % o exception: return any errors or warnings in this structure. % */ typedef struct _CaliperInfo { double area, width, height, projection; ssize_t p, q, v; } CaliperInfo; static inline double getAngle(PointInfo *p,PointInfo *q) { /* Get the angle between line (p,q) and horizontal axis, in degrees. */ return(RadiansToDegrees(atan2(q->y-p->y,q->x-p->x))); } static inline double getDistance(PointInfo *p,PointInfo *q) { double distance; distance=hypot(p->x-q->x,p->y-q->y); return(distance*distance); } static inline double getProjection(PointInfo *p,PointInfo *q,PointInfo *v) { double distance; /* Projection of vector (x,y) - p into a line passing through p and q. */ distance=getDistance(p,q); if (distance < MagickEpsilon) return(INFINITY); return((q->x-p->x)*(v->x-p->x)+(v->y-p->y)*(q->y-p->y))/sqrt(distance); } static inline double getFeretDiameter(PointInfo *p,PointInfo *q,PointInfo *v) { double distance; /* Distance from a point (x,y) to a line passing through p and q. */ distance=getDistance(p,q); if (distance < MagickEpsilon) return(INFINITY); return((q->x-p->x)*(v->y-p->y)-(v->x-p->x)*(q->y-p->y))/sqrt(distance); } MagickExport PointInfo *GetImageMinimumBoundingBox(Image *image, size_t *number_vertices,ExceptionInfo *exception) { CaliperInfo caliper_info; const char *artifact; double angle, diameter, distance; PointInfo *bounding_box, *vertices; ssize_t i; size_t number_hull_vertices; /* Generate the minimum bounding box with the "Rotating Calipers" algorithm. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_vertices=0; vertices=GetImageConvexHull(image,&number_hull_vertices,exception); if (vertices == (PointInfo *) NULL) return((PointInfo *) NULL); *number_vertices=4; bounding_box=(PointInfo *) AcquireQuantumMemory(*number_vertices, sizeof(*bounding_box)); if (bounding_box == (PointInfo *) NULL) { vertices=(PointInfo *) RelinquishMagickMemory(vertices); return((PointInfo *) NULL); } caliper_info.area=2.0*image->columns*image->rows; caliper_info.width=(double) image->columns+image->rows; caliper_info.height=0.0; caliper_info.projection=0.0; caliper_info.p=(-1); caliper_info.q=(-1); caliper_info.v=(-1); for (i=0; i < (ssize_t) number_hull_vertices; i++) { double area = 0.0, max_projection = 0.0, min_diameter = -1.0, min_projection = 0.0; ssize_t j, k; ssize_t p = -1, q = -1, v = -1; for (j=0; j < (ssize_t) number_hull_vertices; j++) { double diameter; diameter=fabs(getFeretDiameter(&vertices[i], &vertices[(i+1) % number_hull_vertices],&vertices[j])); if (min_diameter < diameter) { min_diameter=diameter; p=i; q=(i+1) % number_hull_vertices; v=j; } } for (k=0; k < (ssize_t) number_hull_vertices; k++) { double projection; /* Rotating calipers. */ projection=getProjection(&vertices[p],&vertices[q],&vertices[k]); min_projection=MagickMin(min_projection,projection); max_projection=MagickMax(max_projection,projection); } area=min_diameter*(max_projection-min_projection); if (caliper_info.area > area) { caliper_info.area=area; caliper_info.width=min_diameter; caliper_info.height=max_projection-min_projection; caliper_info.projection=max_projection; caliper_info.p=p; caliper_info.q=q; caliper_info.v=v; } } /* Initialize minimum bounding box. */ diameter=getFeretDiameter(&vertices[caliper_info.p], &vertices[caliper_info.q],&vertices[caliper_info.v]); angle=atan2(vertices[caliper_info.q].y-vertices[caliper_info.p].y, vertices[caliper_info.q].x-vertices[caliper_info.p].x); bounding_box[0].x=vertices[caliper_info.p].x+cos(angle)* caliper_info.projection; bounding_box[0].y=vertices[caliper_info.p].y+sin(angle)* caliper_info.projection; bounding_box[1].x=floor(bounding_box[0].x+cos(angle+MagickPI/2.0)*diameter+ 0.5); bounding_box[1].y=floor(bounding_box[0].y+sin(angle+MagickPI/2.0)*diameter+ 0.5); bounding_box[2].x=floor(bounding_box[1].x+cos(angle)*(-caliper_info.height)+ 0.5); bounding_box[2].y=floor(bounding_box[1].y+sin(angle)*(-caliper_info.height)+ 0.5); bounding_box[3].x=floor(bounding_box[2].x+cos(angle+MagickPI/2.0)*(-diameter)+ 0.5); bounding_box[3].y=floor(bounding_box[2].y+sin(angle+MagickPI/2.0)*(-diameter)+ 0.5); /* Export minimum bounding box properties. */ (void) FormatImageProperty(image,"minimum-bounding-box:area","%.*g", GetMagickPrecision(),caliper_info.area); (void) FormatImageProperty(image,"minimum-bounding-box:width","%.*g", GetMagickPrecision(),caliper_info.width); (void) FormatImageProperty(image,"minimum-bounding-box:height","%.*g", GetMagickPrecision(),caliper_info.height); (void) FormatImageProperty(image,"minimum-bounding-box:_p","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.p].x, GetMagickPrecision(),vertices[caliper_info.p].y); (void) FormatImageProperty(image,"minimum-bounding-box:_q","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.q].x, GetMagickPrecision(),vertices[caliper_info.q].y); (void) FormatImageProperty(image,"minimum-bounding-box:_v","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.v].x, GetMagickPrecision(),vertices[caliper_info.v].y); /* Find smallest angle to origin. */ distance=hypot(bounding_box[0].x,bounding_box[0].y); angle=getAngle(&bounding_box[0],&bounding_box[1]); for (i=1; i < 4; i++) { double d = hypot(bounding_box[i].x,bounding_box[i].y); if (d < distance) { distance=d; angle=getAngle(&bounding_box[i],&bounding_box[(i+1) % 4]); } } artifact=GetImageArtifact(image,"minimum-bounding-box:orientation"); if (artifact != (const char *) NULL) { double length, q_length, p_length; PointInfo delta, point; /* Find smallest perpendicular distance from edge to origin. */ point=bounding_box[0]; for (i=1; i < 4; i++) { if (bounding_box[i].x < point.x) point.x=bounding_box[i].x; if (bounding_box[i].y < point.y) point.y=bounding_box[i].y; } for (i=0; i < 4; i++) { bounding_box[i].x-=point.x; bounding_box[i].y-=point.y; } for (i=0; i < 4; i++) { double d, intercept, slope; delta.x=bounding_box[(i+1) % 4].x-bounding_box[i].x; delta.y=bounding_box[(i+1) % 4].y-bounding_box[i].y; slope=delta.y*PerceptibleReciprocal(delta.x); intercept=bounding_box[(i+1) % 4].y-slope*bounding_box[i].x; d=fabs((slope*bounding_box[i].x-bounding_box[i].y+intercept)* PerceptibleReciprocal(sqrt(slope*slope+1.0))); if ((i == 0) || (d < distance)) { distance=d; point=delta; } } angle=RadiansToDegrees(atan(point.y*PerceptibleReciprocal(point.x))); length=hypot(point.x,point.y); p_length=fabs((double) MagickMax(caliper_info.width,caliper_info.height)- length); q_length=fabs(length-(double) MagickMin(caliper_info.width, caliper_info.height)); if (LocaleCompare(artifact,"landscape") == 0) { if (p_length > q_length) angle+=(angle < 0.0) ? 90.0 : -90.0; } else if (LocaleCompare(artifact,"portrait") == 0) { if (p_length < q_length) angle+=(angle >= 0.0) ? 90.0 : -90.0; } } (void) FormatImageProperty(image,"minimum-bounding-box:angle","%.*g", GetMagickPrecision(),angle); (void) FormatImageProperty(image,"minimum-bounding-box:unrotate","%.*g", GetMagickPrecision(),-angle); vertices=(PointInfo *) RelinquishMagickMemory(vertices); return(bounding_box); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ImageType GetImageType(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IsImageMonochrome(image) != MagickFalse) return(BilevelType); if (IsImageGray(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IsPaletteImage(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageGray() returns grayscale if all the pixels in the image have % the same red, green, and blue intensities, and bi-level is the intensity is % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; const Quantum *p; ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(image,p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(image,p) == MagickFalse)) type=GrayscaleType; p+=GetPixelChannels(image); } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait)) type=GrayscaleAlphaType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType bilevel; ssize_t x; const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); bilevel=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(image,p) == MagickFalse) { bilevel=MagickFalse; break; } p+=GetPixelChannels(image); } if (bilevel == MagickFalse) break; } image_view=DestroyCacheView(image_view); return(bilevel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IdentifyImageMonochrome(image,exception) != MagickFalse) return(BilevelType); if (IdentifyImageGray(image,exception) != UndefinedType) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageGray() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsImageGray method is: % % MagickBooleanType IsImageGray(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageGray(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageMonochrome() returns MagickTrue if type of the image is bi-level. % % The format of the IsImageMonochrome method is: % % MagickBooleanType IsImageMonochrome(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageMonochrome(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O p a q u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageOpaque() returns MagickTrue if none of the pixels in the image have % an alpha value other than OpaqueAlpha (QuantumRange). % % Will return true immediatally is alpha channel is not available. % % The format of the IsImageOpaque method is: % % MagickBooleanType IsImageOpaque(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImageOpaque(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const Quantum *p; ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,p) != OpaqueAlpha) break; p+=GetPixelChannels(image); } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageDepth() sets the depth of the image. % % The format of the SetImageDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red),range),range); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green),range),range); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue),range),range); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].alpha),range),range); } } status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL*QuantumRange) <= MaxMap) { Quantum *depth_map; ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=depth_map[ScaleQuantumToMap(q[i])]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType) q[i]),range),range); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type, ExceptionInfo *exception) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { status=TransformImageColorspace(image,GRAYColorspace,exception); (void) NormalizeImage(image,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleType: { status=TransformImageColorspace(image,GRAYColorspace,exception); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleAlphaType: { status=TransformImageColorspace(image,GRAYColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case PaletteType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } image->alpha_trait=UndefinedPixelTrait; break; } case PaletteBilevelAlphaType: { ChannelType channel_mask; status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); channel_mask=SetImageChannelMask(image,AlphaChannel); (void) BilevelImage(image,(double) QuantumRange/2.0,exception); (void) SetImageChannelMask(image,channel_mask); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case TrueColorAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case ColorSeparationType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case ColorSeparationAlphaType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(status); image->type=type; return(MagickTrue); }
GB_binop__first_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_int16) // A.*B function (eWiseMult): GB (_AemultB_08__first_int16) // A.*B function (eWiseMult): GB (_AemultB_02__first_int16) // A.*B function (eWiseMult): GB (_AemultB_04__first_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_int16) // A*D function (colscale): GB (_AxD__first_int16) // D*A function (rowscale): GB (_DxB__first_int16) // C+=B function (dense accum): GB (_Cdense_accumB__first_int16) // C+=b function (dense accum): GB (_Cdense_accumb__first_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_int16) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 1 // BinaryOp: cij = aij #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_INT16 || GxB_NO_FIRST_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__first_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
opencl_pgpsda_fmt_plug.c
/* * Format for brute-forcing PGP SDAs (self-decrypting archives). * * This software is Copyright (c) 2017 Dhiru Kholia <dhiru at openwall.net> and * it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_pgpsda; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_pgpsda); #else #include <stdint.h> #include <string.h> #include <openssl/cast.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "misc.h" #include "sha.h" #include "common-opencl.h" #include "options.h" #include "pgpsda_common.h" #define FORMAT_LABEL "pgpsda-opencl" #define ALGORITHM_NAME "SHA1 OpenCL" #define BINARY_SIZE 8 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define PLAINTEXT_LENGTH 124 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1001 typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } pgpsda_password; typedef struct { uint8_t v[16]; } pgpsda_hash; typedef struct { uint32_t iterations; uint8_t salt[8]; } pgpsda_salt; static uint32_t (*crypt_out)[BINARY_SIZE * 2 / sizeof(uint32_t)]; static struct custom_salt *cur_salt; static cl_int cl_error; static pgpsda_password *inbuffer; static pgpsda_hash *outbuffer; static pgpsda_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; static struct fmt_main *self; size_t insize, outsize, settingsize; // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl_autotune.h" #include "memdbg.h" static const char *warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(pgpsda_password) * gws; outsize = sizeof(pgpsda_hash) * gws; settingsize = sizeof(pgpsda_salt); crypt_out = mem_calloc(gws, sizeof(*crypt_out)); inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); // Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (inbuffer) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DPLAINTEXT_LENGTH=%d", PLAINTEXT_LENGTH); opencl_init("$JOHN/kernels/pgpsda_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "pgpsda", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(pgpsda_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 300); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; currentsalt.iterations = cur_salt->iterations; memcpy((char*)currentsalt.salt, cur_salt->salt, 8); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy setting to gpu"); } #undef set_key static void set_key(char *key, int index) { uint32_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint32_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); // Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); // Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); // Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { unsigned char *key; CAST_KEY ck; key = outbuffer[index].v; CAST_set_key(&ck, 16, key); memset((unsigned char*)crypt_out[index], 0, BINARY_SIZE); CAST_ecb_encrypt(key, (unsigned char*)crypt_out[index], &ck, CAST_ENCRYPT); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_opencl_pgpsda = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, pgpsda_tests, }, { init, done, reset, fmt_default_prepare, pgpsda_common_valid, fmt_default_split, get_binary, pgpsda_common_get_salt, { pgpsda_iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
LAGraph_FastGraphletTransform.c
//------------------------------------------------------------------------------ // LAGraph_FastGraphletTransform: fast graphlet transform //------------------------------------------------------------------------------ // LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved. // SPDX-License-Identifier: BSD-2-Clause // Contributed by Tanner Hoke, Texas A&M University, ... //------------------------------------------------------------------------------ // LAGraph_FastGraphletTransform: computes the Fast Graphlet Transform of // an undirected graph. No self edges are allowed on the input graph. // fixme: rename this // https://arxiv.org/pdf/2007.11111.pdf //------------------------------------------------------------------------------ #define F_UNARY(f) ((void (*)(void *, const void *)) f) #define LAGraph_FREE_WORK \ { \ GrB_free (&C_3) ; \ GrB_free (&d_0) ; \ GrB_free (&d_1) ; \ GrB_free (&d_2) ; \ GrB_free (&d_3) ; \ GrB_free (&d_4) ; \ GrB_free (&d_5) ; \ GrB_free (&d_6) ; \ GrB_free (&d_7) ; \ GrB_free (&d_8) ; \ GrB_free (&d_9) ; \ GrB_free (&d_10) ; \ GrB_free (&d_11) ; \ GrB_free (&d_12) ; \ GrB_free (&d_13) ; \ GrB_free (&d_14) ; \ GrB_free (&d_15) ; \ GrB_free (&d_2) ; \ GrB_free (&v) ; \ GrB_free (&p_1_minus_one) ; \ GrB_free (&p_1_minus_two) ; \ GrB_free (&two_c_3) ; \ GrB_free (&p_1_p_1_had) ; \ GrB_free (&C_42) ; \ GrB_free (&P_2) ; \ GrB_free (&D_1) ; \ GrB_free (&D_4c) ; \ GrB_free (&D_43) ; \ GrB_free (&U_inv) ; \ GrB_free (&F_raw) ; \ GrB_free (&C_4) ; \ } #define LAGraph_FREE_ALL \ { \ LAGraph_FREE_WORK ; \ } #define F_UNARY(f) ((void (*)(void *, const void *)) f) #include "LG_internal.h" #include "LAGraphX.h" void sub_one_mult (int64_t *z, const int64_t *x) { (*z) = (*x) * ((*x)-1) ; } int LAGraph_FastGraphletTransform ( // outputs: GrB_Matrix *F_net, // 16-by-n matrix of graphlet counts // inputs: LAGraph_Graph G, bool compute_d_15, // probably this makes most sense char *msg ) { LG_CLEAR_MSG ; GrB_Index const U_inv_I[] = {0, 1, 2, 2, 3, 3, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 12, 12, 12, 12, 13, 13, 14, 14, 15} ; GrB_Index const U_inv_J[] = {0, 1, 2, 4, 3, 4, 4, 5, 9, 10, 12, 13, 14, 15, 6, 10, 11, 12, 13, 14, 15, 7, 9, 10, 13, 14, 15, 8, 11, 14, 15, 9, 13, 15, 10, 13, 14, 15, 11, 14, 15, 12, 13, 14, 15, 13, 15, 14, 15, 15} ; int64_t const U_inv_X[] = {1, 1, 1, -2, 1, -1, 1, 1, -2, -1, -2, 4, 2, -6, 1, -1, -2, -2, 2, 4, -6, 1, -1, -1, 2, 1, -3, 1, -1, 1, -1, 1, -2, 3, 1, -2, -2, 6, 1, -2, 3, 1, -1, -1, 3, 1, -3, 1, -3, 1} ; GrB_Index const U_inv_nvals = 50; GrB_Matrix C_3 = NULL, A = NULL, C_42 = NULL, P_2 = NULL, D_1 = NULL, D_4c = NULL, D_43 = NULL, U_inv = NULL, F_raw = NULL, C_4 = NULL ; GrB_Vector d_0 = NULL, d_1 = NULL, d_2 = NULL, d_3 = NULL, d_4 = NULL, d_5 = NULL, d_6 = NULL, d_7 = NULL, d_8 = NULL, d_9 = NULL, d_10 = NULL, d_11 = NULL, d_12 = NULL, d_13 = NULL, d_14 = NULL, d_15 = NULL; GrB_Vector v = NULL, two_c_3 = NULL, p_1_minus_one = NULL, p_1_minus_two = NULL, p_1_p_1_had = NULL ; GrB_Index nvals ; int64_t ntri ; A = G->A ; GrB_Index n ; GRB_TRY (GrB_Matrix_nrows (&n, A)) ; //-------------------------------------------------------------------------- // compute d_0 = e //-------------------------------------------------------------------------- // d_0 = e GRB_TRY (GrB_Vector_new (&d_0, GrB_INT64, n)) ; GRB_TRY (GrB_assign (d_0, NULL, NULL, 1, GrB_ALL, n, NULL)) ; //-------------------------------------------------------------------------- // compute d_1 = Ae (in_degree) //-------------------------------------------------------------------------- GRB_TRY (GrB_Vector_new (&d_1, GrB_INT64, n)) ; // d_1 = Ae (in_degree) GRB_TRY (LAGraph_Cached_OutDegree (G, msg)) ; GRB_TRY (GrB_Vector_dup (&d_1, G->out_degree)) ; //-------------------------------------------------------------------------- // compute d_2 = p_2 //-------------------------------------------------------------------------- GRB_TRY (GrB_Vector_new (&d_2, GrB_INT64, n)) ; // d_2 = p_2 = A*p_1 - c_2 = A*d_1 - d_1 GRB_TRY (GrB_mxv (d_2, NULL, NULL, GxB_PLUS_SECOND_INT64, A, d_1, NULL)) ; GRB_TRY (GrB_eWiseMult (d_2, NULL, NULL, GrB_MINUS_INT64, d_2, d_1, NULL)) ; //-------------------------------------------------------------------------- // compute d_3 = hadamard(p_1, p_1 - 1) / 2 //-------------------------------------------------------------------------- GRB_TRY (GrB_Vector_new (&d_3, GrB_INT64, n)) ; GrB_UnaryOp Sub_one_mult = NULL ; GRB_TRY (GrB_UnaryOp_new (&Sub_one_mult, F_UNARY (sub_one_mult), GrB_INT64, GrB_INT64)) ; GRB_TRY (GrB_apply (d_3, NULL, NULL, Sub_one_mult, d_1, NULL)) ; GRB_TRY (GrB_apply (d_3, NULL, NULL, GrB_DIV_INT64, d_3, (int64_t) 2, NULL)) ; //-------------------------------------------------------------------------- // compute d_4 = C_3e/2 //-------------------------------------------------------------------------- GRB_TRY (GrB_Matrix_new (&C_3, GrB_INT64, n, n)) ; GRB_TRY (GrB_Vector_new (&d_4, GrB_INT64, n)) ; // C_3 = hadamard(A, A^2) GRB_TRY (GrB_mxm (C_3, A, NULL, GxB_PLUS_FIRST_INT64, A, A, GrB_DESC_ST1)) ; // d_4 = c_3 = C_3e/2 GRB_TRY (GrB_reduce (d_4, NULL, NULL, GrB_PLUS_MONOID_INT64, C_3, NULL)) ; GRB_TRY (GrB_apply (d_4, NULL, NULL, GrB_DIV_INT64, d_4, (int64_t) 2, NULL)) ; //-------------------------------------------------------------------------- // compute d_5 = p_3 = A*d_2 - hadamard(p_1, p_1 - 1) - 2c_3 //-------------------------------------------------------------------------- GRB_TRY (GrB_Vector_new (&v, GrB_INT64, n)) ; GRB_TRY (GrB_Vector_new (&two_c_3, GrB_INT64, n)) ; GRB_TRY (GrB_Vector_new (&d_5, GrB_INT64, n)) ; // v = hadamard(p_1, p_1 - 1) GRB_TRY (GrB_apply (v, NULL, NULL, Sub_one_mult, d_1, NULL)) ; // two_c_3 = 2 * c_3 = 2 * d_4 GRB_TRY (GrB_apply (two_c_3, NULL, NULL, GrB_TIMES_INT64, 2, d_4, NULL)) ; // d_5 = A * d_2 GRB_TRY (GrB_mxv (d_5, NULL, NULL, GxB_PLUS_SECOND_INT64, A, d_2, NULL)) ; // d_5 -= hadamard(p_1, p_1 - 1) GRB_TRY (GrB_eWiseAdd (d_5, NULL, NULL, GrB_MINUS_INT64, d_5, v, NULL)) ; // d_5 -= two_c_3 GRB_TRY (GrB_eWiseAdd (d_5, NULL, NULL, GrB_MINUS_INT64, d_5, two_c_3, NULL)) ; //-------------------------------------------------------------------------- // compute d_6 = hadamard(d_2, p_1-1) - 2c_3 //-------------------------------------------------------------------------- GRB_TRY (GrB_Vector_new (&p_1_minus_one, GrB_INT64, n)) ; GRB_TRY (GrB_Vector_new (&d_6, GrB_INT64, n)) ; // p_1_minus_one = p_1 - 1 GRB_TRY (GrB_apply (p_1_minus_one, NULL, NULL, GrB_MINUS_INT64, d_1, (int64_t) 1, NULL)) ; // d_6 = hadamard(d_2, p_1-1) GRB_TRY (GrB_eWiseMult (d_6, NULL, NULL, GrB_TIMES_INT64, d_2, p_1_minus_one, NULL)) ; // d_6 -= 2c_3 GRB_TRY (GrB_eWiseAdd (d_6, NULL, NULL, GrB_MINUS_INT64, d_6, two_c_3, NULL)) ; //-------------------------------------------------------------------------- // compute d_7 = A*hadamard(p_1-1, p_1-2) / 2 //-------------------------------------------------------------------------- GRB_TRY (GrB_Vector_new (&p_1_minus_two, GrB_INT64, n)) ; GRB_TRY (GrB_Vector_new (&p_1_p_1_had, GrB_INT64, n)) ; GRB_TRY (GrB_Vector_new (&d_7, GrB_INT64, n)) ; GRB_TRY (GrB_apply (p_1_minus_two, NULL, NULL, GrB_MINUS_INT64, d_1, (int64_t) 2, NULL)) ; GRB_TRY (GrB_eWiseMult (p_1_p_1_had, NULL, NULL, GrB_TIMES_INT64, p_1_minus_one, p_1_minus_two, NULL)) ; GRB_TRY (GrB_mxv (d_7, NULL, NULL, GxB_PLUS_SECOND_INT64, A, p_1_p_1_had, NULL)) ; GRB_TRY (GrB_apply (d_7, NULL, NULL, GrB_DIV_INT64, d_7, (int64_t) 2, NULL)) ; //-------------------------------------------------------------------------- // compute d_8 = hadamard(p_1, p_1_p_1_had) / 6 //-------------------------------------------------------------------------- GRB_TRY (GrB_Vector_new (&d_8, GrB_INT64, n)) ; GRB_TRY (GrB_eWiseMult (d_8, NULL, NULL, GrB_TIMES_INT64, d_1, p_1_p_1_had, NULL)) ; GRB_TRY (GrB_apply (d_8, NULL, NULL, GrB_DIV_INT64, d_8, (int64_t) 6, NULL)) ; //-------------------------------------------------------------------------- // compute d_9 = A*c_3 - 2*c_3 //-------------------------------------------------------------------------- GRB_TRY (GrB_Vector_new (&d_9, GrB_INT64, n)) ; GRB_TRY (GrB_mxv (d_9, NULL, NULL, GxB_PLUS_SECOND_INT64, A, d_4, NULL)) ; GRB_TRY (GrB_eWiseAdd (d_9, NULL, NULL, GrB_MINUS_INT64, d_9, two_c_3, NULL)) ; //-------------------------------------------------------------------------- // compute d_10 = C_3 * (p_1 - 2) //-------------------------------------------------------------------------- GRB_TRY (GrB_Vector_new (&d_10, GrB_INT64, n)) ; GRB_TRY (GrB_mxv (d_10, NULL, NULL, GxB_PLUS_TIMES_INT64, C_3, p_1_minus_two, NULL)) ; //-------------------------------------------------------------------------- // compute d_11 = hadamard(p_1 - 2, c_3) //-------------------------------------------------------------------------- GRB_TRY (GrB_Vector_new (&d_11, GrB_INT64, n)) ; GRB_TRY (GrB_eWiseMult (d_11, NULL, NULL, GrB_TIMES_INT64, p_1_minus_two, d_4, NULL)) ; //-------------------------------------------------------------------------- // compute d_12 = c_4 = C_{4,2}e/2 //-------------------------------------------------------------------------- GRB_TRY (GrB_Matrix_new (&C_4, GrB_INT64, n, 1)) ; GRB_TRY (GrB_Matrix_new (&D_1, GrB_INT64, n, n)) ; GRB_TRY (GrB_Vector_new (&d_12, GrB_INT64, n)) ; // D_1 = diag(d_1) GRB_TRY (GxB_Matrix_diag (D_1, d_1, (int64_t) 0, NULL)) ; GRB_TRY (GrB_Matrix_nvals (&nvals, A)); const GrB_Index entries_per_tile = 1000; GrB_Index ntiles = (nvals + entries_per_tile - 1) / entries_per_tile ; GrB_Matrix A_Tiles [ntiles], D_Tiles [ntiles], C_Tiles [ntiles] ; GrB_Index Tile_nrows [ntiles] ; GrB_Index Tile_ncols [1] = {n} ; int64_t tot_deg = 0 ; int tile_cnt = 0 ; GrB_Index last_row = -1 ; for (GrB_Index i = 0; i < n; ++i) { int64_t deg ; GRB_TRY (GrB_Vector_extractElement (&deg, d_1, i)) ; if (i == n - 1 || (tot_deg / entries_per_tile != (tot_deg + deg) / entries_per_tile)) { Tile_nrows [tile_cnt++] = i - last_row ; last_row = i ; } tot_deg += deg ; } GRB_TRY (GxB_Matrix_split (A_Tiles, tile_cnt, 1, Tile_nrows, Tile_ncols, A, NULL)) ; GRB_TRY (GxB_Matrix_split (D_Tiles, tile_cnt, 1, Tile_nrows, Tile_ncols, D_1, NULL)) ; for (int i = 0; i < tile_cnt; ++i) C_Tiles [i] = NULL ; #define TRY(method) \ { \ GrB_Info info = method ; \ if (info != GrB_SUCCESS) \ { \ GrB_free (&A_i) ; \ GrB_free (&C_Tiles [i]) ; \ GrB_free (&e) ; \ continue ; \ } \ } GxB_set (GxB_NTHREADS, 1) ; #pragma omp parallel for num_threads(omp_get_max_threads()) schedule(dynamic,1) for (int i = 0; i < tile_cnt; ++i) { GrB_Matrix A_i = NULL, e = NULL ; TRY (GrB_Matrix_new (&e, GrB_INT64, n, 1)) ; TRY (GrB_assign (e, NULL, NULL, (int64_t) 1, GrB_ALL, n, GrB_ALL, 1, NULL)) ; TRY (GrB_Matrix_new (&A_i, GrB_INT64, Tile_nrows [i], n)) ; TRY (GrB_Matrix_new (&C_Tiles [i], GrB_INT64, Tile_nrows [i], 1)) ; TRY (GrB_mxm (A_i, NULL, NULL, GxB_PLUS_PAIR_INT64, A_Tiles [i], A, NULL)) ; TRY (GrB_eWiseAdd (A_i, NULL, NULL, GrB_MINUS_INT64, A_i, D_Tiles [i], NULL)) ; TRY (GrB_apply (A_i, NULL, NULL, Sub_one_mult, A_i, NULL)) ; // multiply A_i by it on the right TRY (GrB_mxm (C_Tiles [i], NULL, NULL, GxB_PLUS_FIRST_INT64, A_i, e, NULL)) ; GrB_free (&A_i) ; GrB_free (&e) ; } GxB_set (GxB_NTHREADS, omp_get_max_threads()) ; GRB_TRY (GxB_Matrix_concat (C_4, C_Tiles, tile_cnt, 1, NULL)) ; // d_12 = C_4 GRB_TRY (GrB_reduce (d_12, NULL, NULL, GrB_PLUS_MONOID_INT64, C_4, NULL)) ; GRB_TRY (GrB_apply (d_12, NULL, NULL, GrB_DIV_INT64, d_12, 2, NULL)) ; //-------------------------------------------------------------------------- // compute d_13 = D_{4,c}e/2 //-------------------------------------------------------------------------- GRB_TRY (GrB_Matrix_new (&D_4c, GrB_INT64, n, n)) ; GRB_TRY (GrB_Vector_new (&d_13, GrB_INT64, n)) ; GRB_TRY (GrB_eWiseMult (D_4c, NULL, NULL, GrB_MINUS_INT64, C_3, A, NULL)) ; // can be mult because we mask with A next GRB_TRY (GrB_mxm (D_4c, A, NULL, GxB_PLUS_SECOND_INT64, A, D_4c, GrB_DESC_S)) ; // d_13 = D_{4,c}*e/2 GRB_TRY (GrB_reduce (d_13, NULL, NULL, GrB_PLUS_INT64, D_4c, NULL)) ; GRB_TRY (GrB_apply (d_13, NULL, NULL, GrB_DIV_INT64, d_13, (int64_t) 2, NULL)) ; //-------------------------------------------------------------------------- // compute d_14 = D_{4,3}e/2 = hadamard(A, C_42)e/2 //-------------------------------------------------------------------------- GRB_TRY (GrB_Matrix_new (&D_43, GrB_INT64, n, n)) ; GRB_TRY (GrB_Vector_new (&d_14, GrB_INT64, n)) ; GRB_TRY (GrB_Matrix_new (&C_42, GrB_INT64, n, n)) ; GRB_TRY (GrB_Matrix_new (&P_2, GrB_INT64, n, n)) ; // P_2 = A*A - diag(d_1) GRB_TRY (GrB_eWiseAdd (P_2, A, NULL, GrB_MINUS_INT64, C_3, D_1, NULL)) ; // C_42 = hadamard(P_2, P_2 - 1) GRB_TRY (GrB_apply (C_42, A, NULL, Sub_one_mult, P_2, NULL)) ; GRB_TRY (GrB_eWiseMult (D_43, NULL, NULL, GrB_TIMES_INT64, A, C_42, NULL)) ; // d_14 = D_{4,3}*e/2 GRB_TRY (GrB_reduce (d_14, NULL, NULL, GrB_PLUS_INT64, D_43, NULL)) ; GRB_TRY (GrB_apply (d_14, NULL, NULL, GrB_DIV_INT64, d_14, (int64_t) 2, NULL)) ; //-------------------------------------------------------------------------- // compute d_15 = Te/6 //-------------------------------------------------------------------------- if (compute_d_15) { LAGRAPH_TRY (LAGraph_KTruss (&A, G, 4, msg)) ; GRB_TRY (GrB_Vector_new (&d_15, GrB_INT64, n)) ; //GrB_wait (A, GrB_MATERIALIZE) ; // this is essential int nthreads = 1 ; // todo: parallelize this... //#pragma omp parallel for num_threads(nthreads) //for (int tid = 0 ; tid < nthreads ; tid++) { GrB_Index *neighbors = (GrB_Index*) malloc(n * sizeof(GrB_Index)); GrB_Index *k4cmn = (GrB_Index*) malloc(n * sizeof(GrB_Index)); int64_t *f15 = (int64_t*) malloc(n * sizeof(int64_t)); GrB_Index *I = (int64_t*) malloc(n * sizeof(GrB_Index)); int *isNeighbor = (int*) malloc(n * sizeof(int)); for (int i = 0; i < n; ++i) { neighbors [i] = k4cmn [i] = f15 [i] = isNeighbor [i] = 0 ; I [i] = i ; } // thread tid operates on A(row1:row2-1,:) GrB_Index row1 = 0;//tid * (n / nthreads) ; GrB_Index row2 = n;//(tid == nthreads - 1) ? n : ((tid+1) * (n / nthreads)) ; GxB_Iterator riterator ; GxB_Iterator_new (&riterator) ; GrB_Info info = GxB_rowIterator_attach (riterator, A, NULL) ; if (info < 0) { LAGraph_FREE_ALL ; return info ; } GxB_Iterator iterator ; GxB_Iterator_new (&iterator) ; info = GxB_rowIterator_attach (iterator, A, NULL) ; if (info < 0) { LAGraph_FREE_ALL ; return info ; } // seek to A(row1,:) info = GxB_rowIterator_seekRow (iterator, row1) ; while (info != GxB_EXHAUSTED) { // iterate over entries in A(i,:) GrB_Index i = GxB_rowIterator_getRowIndex (iterator) ; if (i >= row2) break ; int neighbor_cnt = 0 ; while (info == GrB_SUCCESS) { // working with edge (i, j) GrB_Index j = GxB_rowIterator_getColIndex (iterator) ; if (j > i) { neighbors [neighbor_cnt++] = j ; isNeighbor [j] = 1 ; } info = GxB_rowIterator_nextCol (iterator) ; } for (int neighbor_id = 0 ; neighbor_id < neighbor_cnt ; ++neighbor_id) { GrB_Index j = neighbors [neighbor_id] ; int cmn_cnt = 0 ; info = GxB_rowIterator_seekRow(riterator, j) ; while (info == GrB_SUCCESS) { // iterate over neighbors of j GrB_Index k = GxB_rowIterator_getColIndex (riterator) ; if (k > j && isNeighbor [k]) { k4cmn [cmn_cnt++] = k ; isNeighbor [k] = -1 ; } info = GxB_rowIterator_nextCol (riterator) ; } // check every combination for (int k_1 = 0 ; k_1 < cmn_cnt ; k_1++) { GrB_Index k = k4cmn [k_1] ; info = GxB_rowIterator_seekRow(riterator, k) ; while (info == GrB_SUCCESS) { // iterate over neighbors of k GrB_Index l = GxB_rowIterator_getColIndex (riterator) ; if (l > k && isNeighbor [l] == -1) { f15[i]++ ; f15[j]++ ; f15[k]++ ; f15[l]++ ; } info = GxB_rowIterator_nextCol (riterator) ; } } for (int k_1 = 0 ; k_1 < cmn_cnt ; k_1++) { isNeighbor[k4cmn[k_1]] = 1 ; } } for (int neighbor_id = 0 ; neighbor_id < neighbor_cnt ; ++neighbor_id) { GrB_Index j = neighbors [neighbor_id] ; isNeighbor [j] = 0 ; } // move to the next row, A(i+1,:) info = GxB_rowIterator_nextRow (iterator) ; } GrB_free (&iterator) ; GrB_free (&riterator) ; GRB_TRY (GrB_Vector_build (d_15, I, f15, n, NULL)) ; free (neighbors) ; free (k4cmn) ; free (f15) ; free (I) ; free (isNeighbor) ; } } //-------------------------------------------------------------------------- // construct raw frequencies matrix F_raw //-------------------------------------------------------------------------- GRB_TRY (GrB_Matrix_new (&F_raw, GrB_INT64, 16, n)) ; GrB_Vector d[16] = {d_0, d_1, d_2, d_3, d_4, d_5, d_6, d_7, d_8, d_9, d_10, d_11, d_12, d_13, d_14, d_15} ; for (int i = 0; i < 15 + (compute_d_15 ? 1 : 0); ++i) { GRB_TRY (GrB_Vector_nvals (&nvals, d[i])); GrB_Index *J = (GrB_Index*) malloc (nvals*sizeof(GrB_Index)) ; int64_t *vals = (int64_t*) malloc (nvals*sizeof(int64_t)) ; GRB_TRY (GrB_Vector_extractTuples (J, vals, &nvals, d[i])) ; for (int j = 0; j < nvals; ++j) { GRB_TRY (GrB_Matrix_setElement (F_raw, vals[j], i, J[j])) ; } free (J) ; free (vals) ; } //-------------------------------------------------------------------------- // construct U_inv //-------------------------------------------------------------------------- GRB_TRY (GrB_Matrix_new (&U_inv, GrB_INT64, 16, 16)) ; GRB_TRY (GrB_Matrix_build (U_inv, U_inv_I, U_inv_J, U_inv_X, U_inv_nvals, GrB_PLUS_INT64)) ; //GRB_TRY (GxB_print (U_inv, 3)) ; //-------------------------------------------------------------------------- // construct net frequencies matrix F_net //-------------------------------------------------------------------------- GRB_TRY (GrB_Matrix_new (F_net, GrB_INT64, 16, n)) ; GRB_TRY (GrB_mxm (*F_net, NULL, NULL, GxB_PLUS_TIMES_INT64, U_inv, F_raw, NULL)) ; GrB_Vector f_net = NULL ; GRB_TRY (GrB_Vector_new (&f_net, GrB_INT64, 16)) ; GRB_TRY (GrB_reduce (f_net, NULL, NULL, GrB_PLUS_INT64, *F_net, NULL)) ; GRB_TRY (GxB_print (f_net, 3)) ; GRB_TRY (GrB_free (&f_net)) ; //GRB_TRY (GxB_print (*F_net, 3)) ; //-------------------------------------------------------------------------- // free work //-------------------------------------------------------------------------- LAGraph_FREE_WORK ; return (0) ; }
GB_binop__minus_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_fp32) // A*D function (colscale): GB (_AxD__minus_fp32) // D*A function (rowscale): GB (_DxB__minus_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__minus_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__minus_fp32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_fp32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_fp32) // C=scalar+B GB (_bind1st__minus_fp32) // C=scalar+B' GB (_bind1st_tran__minus_fp32) // C=A+scalar GB (_bind2nd__minus_fp32) // C=A'+scalar GB (_bind2nd_tran__minus_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_FP32 || GxB_NO_MINUS_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gemv_x_csr.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef S #include <immintrin.h> #endif #ifdef _OPENMP #include <omp.h> #endif #if defined (__AVX512__) && defined(S) static float gemv_csr_kernel_doti_simd512_unroll4(const ALPHA_INT ns,const float* x,const ALPHA_INT* indx,const float* y){ ALPHA_INT ns64 = (ns >> 6) << 6; __m512 tmp0,tmp1,tmp2,tmp3; __m512 vy0,vy1,vy2,vy3; __m512 vx0,vx1,vx2,vx3; __m512i vindex0,vindex1,vindex2,vindex3; float tmp = 0; tmp0 = _mm512_setzero(); tmp1 = _mm512_setzero(); tmp2 = _mm512_setzero(); tmp3 = _mm512_setzero(); ALPHA_INT i; for(i = 0; i < ns64;i+=64){ vx0 = _mm512_loadu_ps(&x[i]); vx1 = _mm512_loadu_ps(&x[i+16]); vx2 = _mm512_loadu_ps(&x[i+32]); vx3 = _mm512_loadu_ps(&x[i+48]); vindex0 = _mm512_loadu_si512((void *)&indx[i]); vindex1 = _mm512_loadu_si512((void *)&indx[i+16]); vindex2 = _mm512_loadu_si512((void *)&indx[i+32]); vindex3 = _mm512_loadu_si512((void *)&indx[i+48]); vy0 = _mm512_i32gather_ps(vindex0,y,4); vy1 = _mm512_i32gather_ps(vindex1,y,4); vy2 = _mm512_i32gather_ps(vindex2,y,4); vy3 = _mm512_i32gather_ps(vindex3,y,4); tmp0 = _mm512_fmadd_ps(vx0,vy0,tmp0); tmp1 = _mm512_fmadd_ps(vx1,vy1,tmp1); tmp2 = _mm512_fmadd_ps(vx2,vy2,tmp2); tmp3 = _mm512_fmadd_ps(vx3,vy3,tmp3); } for(; i< ns;++i){ tmp += x[i] * y[indx[i]]; } tmp += _mm512_reduce_add_ps(tmp0) + _mm512_reduce_add_ps(tmp1) + _mm512_reduce_add_ps(tmp2) + _mm512_reduce_add_ps(tmp3); return tmp; } #endif // //static float gemv_s_csr_kernel_doti_simd512(const ALPHA_INT ns,const float* x,const ALPHA_INT* indx,const float* y){ // ALPHA_INT ns16 = (ns >> 4) << 4; // __m512 tmp,vy,vx; // __m512i vindex; // float tmp0 = 0; // tmp = _mm512_setzero(); // ALPHA_INT i; // for(i = 0; i < ns16;i+=16){ // vx = _mm512_loadu_ps(&x[i]); // vindex = _mm512_loadu_epi32(&indx[i]); // vy = _mm512_i32gather_ps(vindex,y,4); // tmp = _mm512_fmadd_ps(vx,vy,tmp); // } // for(; i< ns;++i){ // tmp0 += x[i] * y[indx[i]]; // } // tmp0 += _mm512_reduce_add_ps(tmp); // return tmp0; //} // static float gemv_s_csr_kernel_doti_unroll4(const ALPHA_INT ns,const float* x,const ALPHA_INT* indx,const float* y){ // ALPHA_INT ns4 = ns & ~3; // ALPHA_INT i; // float tmp0 = 0.f; // float tmp1 = 0.f; // float tmp2 = 0.f; // float tmp3 = 0.f; // for(i = 0; i < ns4;i+=4){ // tmp0 += x[i] * y[indx[i]]; // tmp1 += x[i+1] * y[indx[i+1]]; // tmp2 += x[i+2] * y[indx[i+2]]; // tmp3 += x[i+3] * y[indx[i+3]]; // } // for(; i< ns;++i){ // tmp0 += x[i] * y[indx[i]]; // } // return ((tmp0 + tmp1) + (tmp2 + tmp3)); // } // float gemv_s_csr_kernel_doti(const ALPHA_INT ns,const float* x,const ALPHA_INT* indx,const float* y){ // float tmp0 = 0.f; // for(ALPHA_INT i = 0; i< ns;++i){ // tmp0 += x[i] * y[indx[i]]; // } // return tmp0; // } // alphasparse_status_t gemv_s_csr(const float alpha,const spmat_csr_s_t* A,const float* x,const float beta,float* y) // { // ALPHA_INT m = A->rows; // ALPHA_INT num_threads = alpha_get_thread_num(); // ALPHA_INT partition[num_threads + 1]; // balanced_partition_row_by_nnz(A->rows_end, m, num_threads, partition); // #ifdef _OPENMP // #pragma omp parallel num_threads(num_threads) // #endif // { // ALPHA_INT tid = alpha_get_thread_id(); // ALPHA_INT local_m_s = partition[tid]; // ALPHA_INT local_m_e = partition[tid + 1]; // for (ALPHA_INT i = local_m_s; i < local_m_e; i++) // { // y[i] *= beta; // ALPHA_INT pks = A->rows_start[i]; // ALPHA_INT pke = A->rows_end[i]; // ALPHA_INT pkl = pke - pks; // // float tmp = gemv_s_csr_kernel_doti(pkl,&A->values[pks],&A->col_indx[pks],x); // //float tmp = gemv_s_csr_kernel_doti_unroll4(pkl,&A->values[pks],&A->col_indx[pks],x); // //float tmp = gemv_s_csr_kernel_doti_simd512(pkl,&A->values[pks],&A->col_indx[pks],x); // float tmp = gemv_s_csr_kernel_doti_simd512_unroll4(pkl,&A->values[pks],&A->col_indx[pks],x); // y[i] += alpha * tmp; // } // } // return ALPHA_SPARSE_STATUS_SUCCESS; // } static ALPHA_Number gemv_kernel_doti_unroll4(const ALPHA_INT ns, const ALPHA_Number *x, const ALPHA_INT *indx, const ALPHA_Number *y) { ALPHA_INT ns4 = ((ns >> 2) << 2); ALPHA_INT i; ALPHA_Number tmp0, tmp1, tmp2, tmp3; alpha_setzero(tmp0); alpha_setzero(tmp1); alpha_setzero(tmp2); alpha_setzero(tmp3); for (i = 0; i < ns4; i += 4) { alpha_madde(tmp0, x[i], y[indx[i]]); alpha_madde(tmp1, x[i + 1], y[indx[i + 1]]); alpha_madde(tmp2, x[i + 2], y[indx[i + 2]]); alpha_madde(tmp3, x[i + 3], y[indx[i + 3]]); } for (; i < ns; ++i) { alpha_madde(tmp0, x[i], y[indx[i]]); } alpha_adde(tmp0, tmp1); alpha_adde(tmp2, tmp3); alpha_adde(tmp0, tmp2); return tmp0; } static alphasparse_status_t gemv_csr_unroll4(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y, ALPHA_INT lrs, ALPHA_INT lre) { for (ALPHA_INT i = lrs; i < lre; i++) { ALPHA_INT pks = A->rows_start[i]; ALPHA_INT pke = A->rows_end[i]; ALPHA_INT pkl = pke - pks; #if defined (__AVX512__) && defined(S) float tmp = gemv_csr_kernel_doti_simd512_unroll4(pkl,&A->values[pks],&A->col_indx[pks],x); #else ALPHA_Number tmp = gemv_kernel_doti_unroll4(pkl, &A->values[pks], &A->col_indx[pks], x); #endif // #else // ALPHA_Number tmp = gemv_kernel_doti_unroll4(pkl, &A->values[pks], &A->col_indx[pks], x); // #endif alpha_mule(y[i], beta); alpha_madde(y[i], alpha, tmp); } return ALPHA_SPARSE_STATUS_SUCCESS; } static alphasparse_status_t gemv_csr_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { ALPHA_INT m = A->rows; ALPHA_INT num_threads = alpha_get_thread_num(); ALPHA_INT partition[num_threads + 1]; balanced_partition_row_by_nnz(A->rows_end, m, num_threads, partition); #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT local_m_s = partition[tid]; ALPHA_INT local_m_e = partition[tid + 1]; gemv_csr_unroll4(alpha, A, x, beta, y, local_m_s, local_m_e); } return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *mat, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { return gemv_csr_omp(alpha, mat, x, beta, y); }
omp_dsyr2k_batch.c
/** * @file omp_dsyr2k_batch.c * * @brief BBLAS omp_dsyr2k_batch double routine. * * BBLAS is a software package provided by Univ. of Manchester, * Univ. of Tennessee. * * @version 1.0.0 * @author Samuel D. Relton * @author Pedro V. Lara * @author Mawussi Zounon * @date 2016-02-20 * **/ #ifndef DOXYGEN_SHOULD_SKIP_THIS /** * Code generation * @generated from ./bblas_omp/omp_zsyr2k_batch.c normal z -> d, Mon Jun 6 09:44:14 2016 **/ #endif #include<cblas.h> #include "bblas_omp.h" #include "bblas.h" #include <omp.h> #define REAL /** Purpose ------- <b>dsyr2k_batch</b> is a batch version of dsyr2k. It performs one of the matrix-matrix operations arrayC[i] = alpha[i]*arrayA[i]*arrayB[i]**T + alpha[i]*arrayB[i]*arrayA[i]**T + beta[i]*arrayC[i], or arrayC[i] = alpha[i]*arrayA**T *arrayB[i] + alpha[i]*arrayB[i]**T *arrayA[i] + beta[i]*arrayC[i], where alpha[i] and beta[i] are scalars, arrayC[i] is an N[i] by N[i] sym- metric matrix and arrayA[i] and arrayB[i] are N[i] by K[i] matrices in the first case and K[i] by N[i] matrices in the second case. Fixed and Variable Batch Operations ----------------------------------- Two types of batch operation are supported depending upon the value of batch_opts. When <tt>batch_opts = BBLAS_VARIABLE</tt> - all parameters that are arrays must have length at least batch_count. - all parameters that are arrays must have all values set. When <tt>batch_opts = BBLAS_FIXED</tt> - all parameters that are arrays (except for arrayA, arrayB, arrayC, and info) must have length at least one. - all parameters that are arrays (except for arrayA, arrayB, arrayC, and info) need only to have their first value set. This means that for a <tt>BBLAS_FIXED</tt> batch, the values of uplo[0], trans[0], N[0], K[0], alpha[0], beta[0], lda[0], ldb[0], and ldc[0] are used for all computations. Parameters ---------- @param[in] uplo Array of <tt>enum BBLAS_UPLO</tt>. On entry, uplo[i] specifies whether the upper or lower triangular part of the matrix arrayC[i] is to be referenced as follows: - = 'BblasUpper' Only the upper triangular part of the matrix is to be referenced. - = 'BblasLower' Only the lower triangular part of the matrix is to be referenced. @param[in] trans Array of <tt>enum BBLAS_TRANS</tt>. On entry, trans[i] specifies the operation to be performed as follows: - = 'BblasNoTrans' arrayC[i] = alpha[i]*arrayA[i]*arrayB[i]**T + alpha[i]*arrayB[i]*arrayA[i]**T + beta[i]*arrayC[i] - = 'BblasTrans' arrayC[i] = alpha[i]*arrayA[i]**T *arrayB[i] + alpha[i]*arrayB[i]**T *arrayA[i] + beta[i]*arrayC[i]. @param[in] N Array of <tt>int</tt>. Each element N[i] specifies the number of rows and columns of the matrix arrayC[i]. N[i] must be greater than zero. @param[in] K Array of <tt>int</tt>. On entry with trans[i] = 'BblasNoTrans', K[i] specifies the number of columns of the matrices arrayA[i] and arrayB[i], and upon entry with trans[i] = 'BblasTrans', K[i] specifies the number of rows of the matrices arrayA[i] and arrayB[i]. K[i] must be greater than zero. @param[in] alpha Array of <tt>real_16</tt>. @param[in] arrayA Array of pointers. Each element arrayA[i] is a pointer to a DOUBLE PRECISION matrix of dimension lda[i] by Ka[i], where Ka[i] = K[i] when transA[i] = BblasNoTrans and is N[i] otherwise. Before entry with transA[i] = BblasNoTrans, the leading N[i] by K[i] part of the arrayA[i] must contain the elements of arrayA[i], otherwise the leading K[i] by N[i] part of the arrayA[i] must contain the elements of arrayA[i]. @param[in] lda Array of <tt>int</tt>. On entry, lda[i] specifies the first dimension of arrayA[i] as declared in the calling (sub) program. When transA[i] = BblasNoTrans then lda[i] must be at least max( 1, N[i] ), otherwise lda[i] must be at least max( 1, K[i] ). @param[in] arrayB Array of pointers. Each element arrayB[i] is a pointer to a DOUBLE PRECISION matrix of dimension lda[i] by Ka[i], where Ka[i] = K[i] when transA[i] = BblasNoTrans and is N[i] otherwise. Before entry with transA[i] = BblasNoTrans, the leading N[i] by K[i] part of the arrayB[i] must contain the elements of arrayB[i], otherwise the leading K[i] by N[i] part of the arrayB[i] must contain the elements of arrayB[i]. @param[in] ldb Array of <tt>int</tt>. On entry, ldb[i] specifies the first dimension of arrayA[i] as declared in the calling (sub) program. When transA[i] = BblasNoTrans then ldb[i] must be at least max( 1, N[i] ), otherwise ldb[i] must be at least max( 1, K[i] ). @param[in] beta Array of <tt>real_16</tt>. When beta[i] is set to zero arrayC[i] need not be set on input. @param[in,out] arrayC Array of pointers. Each elements arrayC[i] is a pointer to a DOUBLE PRECISION matrix of dimension ldc[i] by N[i]. Before entry with uplo[i] = 'BblasUpper', the leading N[i] by N[i] upper triangular part of the arrayC[i] must con- tain the upper triangular part of the symmetric matrix and the strictly lower triangular part of arrayC[i] is not referenced. On exit, the upper triangular part of the arrayC[i] is overwritten by the upper triangular part of the updated matrix. Before entry with uplo[i] = 'BlasLower', the leading N[i] by N[i] lower triangular part of the arrayC[i] must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of arrayC[i] is not referenced. On exit, the lower triangular part of the arrayC[i] is overwritten by the lower triangular part of the updated matrix. @param[in] ldc Array of <tt>int</tt>. On entry, ldc[i] specifies the first dimension of arrayC[i] as declared in the calling (sub) program. Each element ldc must be at least max( 1, N[i] ). @param[in] batch_count <tt>int</tt> The number of matrices to operate on. @param[in] batch_opts <tt>enum BBLAS_OPTS</tt> One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of batch operation required. @param[out] info Array of <tt>int</tt>. Each element info[i] is the error return code of the ith dsyr2k in the batch, these need not be set on entry. The error codes can be found in bblas_macros.h. **/ void omp_dsyr2k_batch( const enum BBLAS_UPLO *uplo, const enum BBLAS_TRANS *trans, const int *N, const int *K, const double *alpha, const double **arrayA, const int *lda, const double **arrayB, const int *ldb, const double *beta, double **arrayC, const int *ldc, const int batch_count, enum BBLAS_OPTS batch_opts, int *info) { /*Local variables */ int first_index = 0; int batch_iter; int LDA, LDB; char func_name[15] = "dsyr2k_batch"; /* Check input arguments */ if (batch_count < 0) { xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1); } if (batch_opts == BBLAS_FIXED) { if ((uplo[first_index] != BblasUpper) && (uplo[first_index] != BblasLower)) { xerbla_batch(func_name, BBLAS_ERR_UPLO, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_UPLO; } return; } if ((trans[first_index] != BblasNoTrans) && (trans[first_index] != BblasTrans) && (trans[first_index] != BblasConjTrans)) { xerbla_batch(func_name, BBLAS_ERR_TRANS, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_TRANS; } return; } if (N[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_N; } return; } if (K[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_K, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_K; } return; } if (trans[first_index] == BblasNoTrans) { LDA = N[first_index]; LDB = N[first_index]; } else { LDA = K[first_index]; LDB = K[first_index]; } if (lda[first_index] < max(1,LDA)) { xerbla_batch(func_name, BBLAS_ERR_LDA, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDA; } return; } if (ldb[first_index] < max(1, LDB)) { xerbla_batch(func_name, BBLAS_ERR_LDB, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDB; } return; } if (ldc[first_index] < max(1, N[first_index])) { xerbla_batch(func_name, BBLAS_ERR_LDC, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDC; } return; } /* particular case */ if (N[first_index] == 0 || K[first_index] == 0 || (alpha[first_index] == (double)0.0 || beta[first_index] == (double)1.0)) { for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_SUCCESS; } return; } #pragma omp parallel for private(batch_iter) for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { /*Call to cblas_dsyr2k */ cblas_dsyr2k( BblasColMajor, uplo[first_index], trans[first_index], N[first_index], K[first_index], (alpha[first_index]), arrayA[batch_iter], lda[first_index], arrayB[batch_iter], ldb[first_index], (beta[first_index]), arrayC[batch_iter], ldc[first_index]); /* Successful */ info[batch_iter] = BBLAS_SUCCESS; } /*END FIXED SIZE FOR LOOP */ }else if (batch_opts == BBLAS_VARIABLE) { #pragma omp parallel for private (batch_iter, LDA, LDB) for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { /* Check input arguments */ if ((uplo[batch_iter] != BblasUpper) && (uplo[batch_iter] != BblasLower)) { xerbla_batch(func_name, BBLAS_ERR_UPLO, batch_iter); info[batch_iter] = BBLAS_ERR_UPLO; continue; } if ((trans[batch_iter] != BblasNoTrans) && (trans[batch_iter] != BblasTrans) && (trans[batch_iter] != BblasConjTrans)) { xerbla_batch(func_name, BBLAS_ERR_TRANS, batch_iter); info[batch_iter] = BBLAS_ERR_TRANS; continue; } if (N[batch_iter] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, batch_iter); info[batch_iter] = BBLAS_ERR_N; continue; } if (K[batch_iter] < 0) { xerbla_batch(func_name, BBLAS_ERR_K, batch_iter); info[batch_iter] = BBLAS_ERR_K; continue; } if (trans[batch_iter] == BblasNoTrans) { LDA = N[batch_iter]; LDB = N[batch_iter]; } else { LDA = K[batch_iter]; LDB = K[batch_iter]; } if (lda[batch_iter] < max(1, LDA)) { xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter); info[batch_iter] = BBLAS_ERR_LDA; continue; } if (ldb[batch_iter] < max(1, LDB)) { xerbla_batch(func_name, BBLAS_ERR_LDB, batch_iter); info[batch_iter] = BBLAS_ERR_LDB; continue; } if (ldc[batch_iter] < max(1, N[batch_iter])) { xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter); info[batch_iter] = BBLAS_ERR_LDC; continue; } /* particular case */ if (N[batch_iter] == 0 || K[batch_iter] == 0 || ((alpha[batch_iter] == (double)0.0) && beta[batch_iter] == (double)1.0)) { info[batch_iter] = BBLAS_SUCCESS; continue; } cblas_dsyr2k( BblasColMajor, uplo[batch_iter], trans[batch_iter], N[batch_iter], K[batch_iter], (alpha[batch_iter]), arrayA[batch_iter], lda[batch_iter], arrayB[batch_iter], ldb[batch_iter], (beta[batch_iter]), arrayC[batch_iter], ldc[batch_iter]); /* Successful */ info[batch_iter] = BBLAS_SUCCESS; } }else { xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1); } } #undef REAL
Sync.c
/* Filename: Sync.c * Author: Mohammed Sourouri <mohamso@simula.no> * * Synchronous Multi-GPU code where the number of threads spawned * equals the number of GPUs. All memory transfers * are synchronous. This code corresponds to "OpenMP" results in Figure-8 in the * SC'14 paper. * * * Copyright [2014] [Mohammed Sourouri] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "Sync.h" #define DEBUG #define checkCuda(error) __checkCuda(error, __FILE__, __LINE__) //////////////////////////////////////////////////////////////////////////////// // A method for checking error in CUDA calls //////////////////////////////////////////////////////////////////////////////// inline void __checkCuda(cudaError_t error, const char *file, const int line) { #if defined(DEBUG) || defined(_DEBUG) if (error != cudaSuccess) { printf("checkCuda error at %s:%i: %s\n", file, line, cudaGetErrorString(cudaGetLastError())); exit(-1); } #endif return; } //////////////////////////////////////////////////////////////////////////////// // Program Main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char *argv[]) { int Nx, Ny, Nz, max_iters; int blockX, blockY, blockZ; if (argc == 8) { Nx = atoi(argv[1]); Ny = atoi(argv[2]); Nz = atoi(argv[3]); max_iters = atoi(argv[4]); blockX = atoi(argv[5]); blockY = atoi(argv[6]); blockZ = atoi(argv[7]); } else { printf("Usage: %s nx ny nz i block_x block_y block_z number_of_threads\n", argv[0]); exit(1); } // Get the number of GPUS int number_of_devices; checkCuda(cudaGetDeviceCount(&number_of_devices)); if (number_of_devices < 2) { printf("Less than two devices were found.\n"); printf("Exiting...\n"); return -1; } // Decompose along the Z-axis int _Nz = Nz/number_of_devices; // Define constants const _DOUBLE_ L = 1.0; const _DOUBLE_ h = L/(Nx+1); const _DOUBLE_ dt = h*h/6.0; const _DOUBLE_ beta = dt/(h*h); const _DOUBLE_ c0 = beta; const _DOUBLE_ c1 = (1-6*beta); // Check if ECC is turned on ECCCheck(number_of_devices); // Set the number of OpenMP threads omp_set_num_threads(number_of_devices); #pragma omp parallel { unsigned int tid = omp_get_num_threads(); #pragma omp single { printf("Number of OpenMP threads: %d\n", tid); } } // CPU memory operations int dt_size = sizeof(_DOUBLE_); _DOUBLE_ *u_new, *u_old; u_new = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(Nz+2)); u_old = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(Nz+2)); init(u_old, u_new, h, Nx, Ny, Nz); // Allocate and generate arrays on the host size_t pitch_bytes; size_t pitch_gc_bytes; _DOUBLE_ *h_Unew, *h_Uold; _DOUBLE_ *h_s_Uolds[number_of_devices], *h_s_Unews[number_of_devices]; _DOUBLE_ *left_send_buffer[number_of_devices], *left_receive_buffer[number_of_devices]; _DOUBLE_ *right_send_buffer[number_of_devices], *right_receive_buffer[number_of_devices]; h_Unew = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(Nz+2)); h_Uold = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(Nz+2)); init(h_Uold, h_Unew, h, Nx, Ny, Nz); #pragma omp parallel { unsigned int tid = omp_get_thread_num(); h_s_Unews[tid] = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(_Nz+2)); h_s_Uolds[tid] = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(_Nz+2)); right_send_buffer[tid] = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(_GC_DEPTH)); left_send_buffer[tid] = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(_GC_DEPTH)); right_receive_buffer[tid] = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(_GC_DEPTH)); left_receive_buffer[tid] = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(_GC_DEPTH)); checkCuda(cudaHostAlloc((void**)&h_s_Unews[tid], dt_size*(Nx+2)*(Ny+2)*(_Nz+2), cudaHostAllocPortable)); checkCuda(cudaHostAlloc((void**)&h_s_Uolds[tid], dt_size*(Nx+2)*(Ny+2)*(_Nz+2), cudaHostAllocPortable)); checkCuda(cudaHostAlloc((void**)&right_send_buffer[tid], dt_size*(Nx+2)*(Ny+2)*(_GC_DEPTH), cudaHostAllocPortable)); checkCuda(cudaHostAlloc((void**)&left_send_buffer[tid], dt_size*(Nx+2)*(Ny+2)*(_GC_DEPTH), cudaHostAllocPortable)); checkCuda(cudaHostAlloc((void**)&right_receive_buffer[tid], dt_size*(Nx+2)*(Ny+2)*(_GC_DEPTH), cudaHostAllocPortable)); checkCuda(cudaHostAlloc((void**)&left_receive_buffer[tid], dt_size*(Nx+2)*(Ny+2)*(_GC_DEPTH), cudaHostAllocPortable)); init_subdomain(h_s_Uolds[tid], h_Uold, Nx, Ny, _Nz, tid); } // GPU memory operations _DOUBLE_ *d_s_Unews[number_of_devices], *d_s_Uolds[number_of_devices]; _DOUBLE_ *d_right_send_buffer[number_of_devices], *d_left_send_buffer[number_of_devices]; _DOUBLE_ *d_right_receive_buffer[number_of_devices], *d_left_receive_buffer[number_of_devices]; #pragma omp parallel { unsigned int tid = omp_get_thread_num(); checkCuda(cudaSetDevice(tid)); CopyToConstantMemory(c0, c1); checkCuda(cudaMallocPitch((void**)&d_s_Uolds[tid], &pitch_bytes, dt_size*(Nx+2), (Ny+2)*(_Nz+2))); checkCuda(cudaMallocPitch((void**)&d_s_Unews[tid], &pitch_bytes, dt_size*(Nx+2), (Ny+2)*(_Nz+2))); checkCuda(cudaMallocPitch((void**)&d_left_receive_buffer[tid], &pitch_gc_bytes, dt_size*(Nx+2), (Ny+2)*(_GC_DEPTH))); checkCuda(cudaMallocPitch((void**)&d_right_receive_buffer[tid], &pitch_gc_bytes, dt_size*(Nx+2), (Ny+2)*(_GC_DEPTH))); checkCuda(cudaMallocPitch((void**)&d_left_send_buffer[tid], &pitch_gc_bytes, dt_size*(Nx+2), (Ny+2)*(_GC_DEPTH))); checkCuda(cudaMallocPitch((void**)&d_right_send_buffer[tid], &pitch_gc_bytes, dt_size*(Nx+2), (Ny+2)*(_GC_DEPTH))); } // Copy data from host to the device double HtD_timer = 0.; HtD_timer -= omp_get_wtime(); #pragma omp parallel { unsigned int tid = omp_get_thread_num(); checkCuda(cudaSetDevice(tid)); checkCuda(cudaMemcpy2D(d_s_Uolds[tid], pitch_bytes, h_s_Uolds[tid], dt_size*(Nx+2), dt_size*(Nx+2), ((Ny+2)*(_Nz+2)), cudaMemcpyHostToDevice)); checkCuda(cudaMemcpy2D(d_s_Unews[tid], pitch_bytes, h_s_Unews[tid], dt_size*(Nx+2), dt_size*(Nx+2), ((Ny+2)*(_Nz+2)), cudaMemcpyHostToDevice)); } HtD_timer += omp_get_wtime(); int pitch = pitch_bytes/dt_size; int gc_pitch = pitch_gc_bytes/dt_size; // GPU kernel launch parameters dim3 threads_per_block(blockX, blockY, blockZ); unsigned int blocksInX = getBlock(Nx, blockX); unsigned int blocksInY = getBlock(Ny, blockY); unsigned int blocksInZ = getBlock(_Nz-2, k_loop); dim3 thread_blocks(blocksInX, blocksInY, blocksInZ); dim3 thread_blocks_halo(blocksInX, blocksInY); double compute_timer = 0.; compute_timer -= omp_get_wtime(); #pragma omp parallel { int tid = omp_get_thread_num(); for(int iterations = 0; iterations < max_iters; iterations++) { // Compute inner nodes checkCuda(cudaSetDevice(tid)); ComputeInnerPoints(thread_blocks, threads_per_block, d_s_Unews[tid], d_s_Uolds[tid], pitch, Nx, Ny, _Nz); // Copy data to device 1-3 from 0-2 if (tid < number_of_devices-1) { checkCuda(cudaSetDevice(tid)); CopyBoundaryRegionToGhostCell(thread_blocks_halo, threads_per_block, d_s_Unews[tid], d_right_send_buffer[tid], Nx, Ny, _Nz, pitch, gc_pitch, 0); checkCuda(cudaMemcpy2D(right_send_buffer[tid], dt_size*(Nx+2), d_right_send_buffer[tid], pitch_gc_bytes, dt_size*(Nx+2), (Ny+2)*(_GC_DEPTH), cudaMemcpyDeviceToHost)); } // Copy data to device 0-2 from 1-3 if (tid > 0) { checkCuda(cudaSetDevice(tid)); CopyBoundaryRegionToGhostCell(thread_blocks_halo, threads_per_block, d_s_Unews[tid], d_left_send_buffer[tid], Nx, Ny, _Nz, pitch, gc_pitch, 1); checkCuda(cudaMemcpy2D(left_send_buffer[tid], dt_size*(Nx+2), d_left_send_buffer[tid], pitch_gc_bytes, dt_size*(Nx+2), (Ny+2)*(_GC_DEPTH), cudaMemcpyDeviceToHost)); } #pragma omp barrier // Copy right boundary data to device 1 if (tid > 0) { checkCuda(cudaSetDevice(tid)); checkCuda(cudaMemcpy2D(d_left_receive_buffer[tid], pitch_gc_bytes, right_send_buffer[tid-1], dt_size*(Nx+2), dt_size*(Nx+2), ((Ny+2)*(_GC_DEPTH)), cudaMemcpyHostToDevice)); CopyGhostCellToBoundaryRegion(thread_blocks_halo, threads_per_block, d_s_Unews[tid], d_left_receive_buffer[tid], Nx, Ny, _Nz, pitch, gc_pitch, 1); } // Copy left boundary data to device 0 if (tid < number_of_devices-1) { checkCuda(cudaSetDevice(tid)); checkCuda(cudaMemcpy2D(d_right_receive_buffer[tid], pitch_gc_bytes, left_send_buffer[tid+1], dt_size*(Nx+2), dt_size*(Nx+2), ((Ny+2)*(_GC_DEPTH)), cudaMemcpyHostToDevice)); CopyGhostCellToBoundaryRegion(thread_blocks_halo, threads_per_block, d_s_Unews[tid], d_right_receive_buffer[tid], Nx, Ny, _Nz, pitch, gc_pitch, 0); } // Swap pointers on the host #pragma omp barrier checkCuda(cudaSetDevice(tid)); checkCuda(cudaDeviceSynchronize()); swap(_DOUBLE_*, d_s_Unews[tid], d_s_Uolds[tid]); } } compute_timer += omp_get_wtime(); // Copy data from device to host double DtH_timer = 0; DtH_timer -= omp_get_wtime(); #pragma omp parallel { unsigned int tid = omp_get_thread_num(); checkCuda(cudaSetDevice(tid)); checkCuda(cudaMemcpy2D(h_s_Uolds[tid], dt_size*(Nx+2), d_s_Uolds[tid], pitch_bytes, dt_size*(Nx+2), (Ny+2)*(_Nz+2), cudaMemcpyDeviceToHost)); } DtH_timer += omp_get_wtime(); // Merge sub-domains into a one big domain #pragma omp parallel { unsigned int tid = omp_get_thread_num(); merge_domains(h_s_Uolds[tid], h_Uold, Nx, Ny, _Nz, tid); } // Calculate on host #if defined(DEBUG) || defined(_DEBUG) cpu_heat3D(u_new, u_old, c0, c1, max_iters, Nx, Ny, Nz); #endif float gflops = CalcGflops(compute_timer, max_iters, Nx, Ny, Nz); PrintSummary("3D Heat (7-pt)", "Plane sweeping", compute_timer, HtD_timer, DtH_timer, gflops, max_iters, Nx); _DOUBLE_ t = max_iters * dt; CalcError(h_Uold, u_old, t, h, Nx, Ny, Nz); #if defined(DEBUG) || defined(_DEBUG) //exportToVTK(h_Uold, h, "heat3D.vtk", Nx, Ny, Nz); #endif #pragma omp parallel { unsigned int tid = omp_get_thread_num(); checkCuda(cudaSetDevice(tid)); checkCuda(cudaFree(d_s_Unews[tid])); checkCuda(cudaFree(d_s_Uolds[tid])); checkCuda(cudaFree(d_right_send_buffer[tid])); checkCuda(cudaFree(d_left_send_buffer[tid])); checkCuda(cudaFree(d_right_receive_buffer[tid])); checkCuda(cudaFree(d_left_receive_buffer[tid])); checkCuda(cudaFreeHost(h_s_Unews[tid])); checkCuda(cudaFreeHost(h_s_Uolds[tid])); checkCuda(cudaFreeHost(left_send_buffer[tid])); checkCuda(cudaFreeHost(right_send_buffer[tid])); checkCuda(cudaFreeHost(left_receive_buffer[tid])); checkCuda(cudaFreeHost(right_receive_buffer[tid])); checkCuda(cudaDeviceReset()); } free(u_old); free(u_new); return 0; }
GB_unaryop__minv_int16_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int16_int16 // op(A') function: GB_tran__minv_int16_int16 // C type: int16_t // A type: int16_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 16) #define GB_ATYPE \ int16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 16) ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int16_int16 ( int16_t *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int16_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ldriver_parallel.c
#include "stepper_parallel.h" #include "shallow2d.h" #ifdef _OPENMP #include <omp.h> #elif defined SYSTIME #include <sys/time.h> #endif #include <lua.h> #include <lauxlib.h> #include <lualib.h> #include <assert.h> #include <stdio.h> #include <stdlib.h> //ldoc on /** * # Driver code * * The driver code is where we put together the time stepper and * the physics routines to actually solve the equations and make * pretty pictures of the solutions. * * ## Diagnostics * * The numerical method is supposed to preserve (up to rounding * errors) the total volume of water in the domain and the total * momentum. Ideally, we should also not see negative water heights, * since that will cause the system of equations to blow up. For * debugging convenience, we'll plan to periodically print diagnostic * information about these conserved quantities (and about the range * of water heights). */ void solution_check(central2d_t* sim) { int nx = sim->nx, ny = sim->ny; float* U = sim->U; float h_sum = 0, hu_sum = 0, hv_sum = 0; float hmin = U[central2d_offset(sim,0,0,0)]; float hmax = hmin; for (int j = 0; j < ny; ++j) { for (int i = 0; i < nx; ++i) { float h = U[central2d_offset(sim,0,i,j)]; h_sum += h; hu_sum += U[central2d_offset(sim,1,i,j)]; hv_sum += U[central2d_offset(sim,2,i,j)]; hmax = fmaxf(h, hmax); hmin = fminf(h, hmin); } // for (int i = 0; i < nx; ++i) { } // for (int j = 0; j < ny; ++j) { float cell_area = sim->dx * sim->dy; h_sum *= cell_area; hu_sum *= cell_area; hv_sum *= cell_area; printf("-\n Volume: %g\n Momentum: (%g, %g)\n Range: [%g, %g]\n", h_sum, hu_sum, hv_sum, hmin, hmax); assert(hmin > 0); } // void solution_check(central2d_t* sim) /** * ## I/O * * After finishing a run (or every several steps), we might want to * write out a data file for further processing by some other program * -- in this case, a Python visualizer. The visualizer takes the * number of pixels in x and y in the first two entries, then raw * single-precision raster pictures. */ FILE* viz_open(const char* fname, central2d_t* sim, int vskip) { FILE* fp = fopen(fname, "w"); if (fp) { float xy[2] = {sim->nx/vskip, sim->ny/vskip}; fwrite(xy, sizeof(float), 2, fp); } // if (fp) { return fp; } // FILE* viz_open(const char* fname, central2d_t* sim, int vskip) void viz_close(FILE* fp) { fclose(fp); } // void viz_close(FILE* fp) void viz_frame(FILE* fp, central2d_t* sim, int vskip) { if (!fp) { return; } // if (!fp) { for (int iy = 0; iy < sim->ny; iy += vskip) { for (int ix = 0; ix < sim->nx; ix += vskip) { fwrite(sim->U + central2d_offset(sim,0,ix,iy), sizeof(float), 1, fp); } // for (int ix = 0; ix < sim->nx; ix += vskip) { } // for (int iy = 0; iy < sim->ny; iy += vskip) { } // void viz_frame(FILE* fp, central2d_t* sim, int vskip) /** * ## Lua driver routines * * A better way to manage simulation parameters is by a scripting * language. Python is a popular choice, but I prefer Lua for many * things (not least because it is an easy build). It's also quite * cheap to call a Lua function for every point in a mesh * (less so for Python, though it probably won't make much difference). * * ### Lua callback functions * * We specify the initial conditions by providing the simulator * with a callback function to be called at each cell center. * The callback function is assumed to be the `init` field of * a table at index 1. */ void lua_init_sim(lua_State* L, central2d_t* sim) { lua_getfield(L, 1, "init"); if (lua_type(L, -1) != LUA_TFUNCTION) { luaL_error(L, "Expected init to be a string"); } // if (lua_type(L, -1) != LUA_TFUNCTION) { int nx = sim->nx, ny = sim->ny, nfield = sim->nfield; float dx = sim->dx, dy = sim->dy; float* U = sim->U; for (int ix = 0; ix < nx; ++ix) { float x = (ix + 0.5) * dx; for (int iy = 0; iy < ny; ++iy) { float y = (iy + 0.5) * dy; lua_pushvalue(L, -1); lua_pushnumber(L, x); lua_pushnumber(L, y); lua_call(L, 2, nfield); for (int k = 0; k < nfield; ++k) { U[central2d_offset(sim,k,ix,iy)] = lua_tonumber(L, k-nfield); } // for (int k = 0; k < nfield; ++k) { lua_pop(L, nfield); } // for (int iy = 0; iy < ny; ++iy) { } // for (int ix = 0; ix < nx; ++ix) { lua_pop(L,1); } // void lua_init_sim(lua_State* L, central2d_t* sim) /** * ### Running the simulation * * The `run_sim` function looks a lot like the main routine of the * "ordinary" command line driver. We specify the initial conditions * by providing the simulator with a callback function to be called at * each cell center. Note that we have two different options for * timing the steps -- we can use the OpenMP timing routines * (preferable if OpenMP is available) or the POSIX `gettimeofday` * if the `SYSTIME` macro is defined. If there's no OpenMP and * `SYSTIME` is undefined, we fall back to just printing the number * of steps without timing information. */ int run_sim(lua_State* L) { // lua set up. int n = lua_gettop(L); if (n != 1 || !lua_istable(L, 1)) { luaL_error(L, "Argument must be a table"); } // if (n != 1 || !lua_istable(L, 1)) { lua_getfield(L, 1, "w"); lua_getfield(L, 1, "h"); lua_getfield(L, 1, "cfl"); lua_getfield(L, 1, "ftime"); lua_getfield(L, 1, "nx"); lua_getfield(L, 1, "ny"); lua_getfield(L, 1, "vskip"); lua_getfield(L, 1, "frames"); lua_getfield(L, 1, "out"); double grid_width = luaL_optnumber( L, 2, 2.0); double grid_height = luaL_optnumber( L, 3, grid_width); double cfl = luaL_optnumber( L, 4, 0.45); double ftime = luaL_optnumber( L, 5, 0.01); int nx = luaL_optinteger(L, 6, 200); int ny = luaL_optinteger(L, 7, nx); int vskip = luaL_optinteger(L, 8, 1); int frames = luaL_optinteger(L, 9, 50); const char* fname = luaL_optstring( L, 10, "sim.out"); lua_pop(L, 9); // initialize the global simulation object (which holds the entire grid) central2d_t* sim = central2d_init(grid_width, grid_height, nx, ny, 3, // nfield shallow2d_flux, shallow2d_speed, cfl); // more lua stuff. lua_init_sim(L,sim); printf("%g %g %d %d %g %d %g\n", grid_width, grid_height, nx, ny, cfl, frames, ftime); FILE* viz = viz_open(fname, sim, vskip); solution_check(sim); viz_frame(viz, sim, vskip); ////////////////////////////////////////////////////////////////////////////// // Begin Parallel region! // First, make sure that openMP is defined... if not, then abort! #ifndef _OPENMP printf("openMP not defined. Aborting\n"); abort(); #endif // First get the number of threads from the environment // If the number of threads is null, then set the number number of threads to 1 char* s = getenv("OMP_NUM_THREADS"); int num_threads = 0; if (s != NULL) { num_threads = atoi(s); } if (num_threads == 0) { num_threads = 1; } printf("Number of threads: %d\n", num_threads); const int n_rows = num_threads; const int n_cols = 1; double tcompute = 0; #pragma omp parallel num_threads(n_rows*n_cols) { // First, check that there are n_rows*n_cols threads if(omp_get_num_threads() != n_rows*n_cols) { printf("Couldn't create enough threads! Wanted %d but got %d\n aborting.\n", n_rows*n_cols, omp_get_num_threads()); abort(); } // if(omp_get_num_threads() != n_rows*n_cols) { /* In this approach, we partition the global grid into a bunch of sub grids. We split the rows of the global grid into n_rows rows and n_cols cols. We assign threads to pieces of the partition based on their thread number. Let's do that now. pieces of the partition are indentified by two indicies, p_row and p_col. These specify the row and column (within the partition) of the piece. */ int p_row = omp_get_thread_num() % n_rows; int p_col = ((int) omp_get_thread_num()) / ((int) n_rows); /* Now that each processor has its sub grid indicies, we can determine which rows and columns (within the global grid) each processor will be responsible for. */ const int ny_p = ny / n_rows; int ylow_local = p_row*ny_p; int yhigh_local; if(p_row == n_rows) { yhigh_local = ny; } else { yhigh_local = ylow_local + ny_p; } const int nx_p = nx / n_cols; int xlow_local = p_col*nx_p; int xhigh_local; if(p_col == n_cols) { xhigh_local = nx; } else { xhigh_local = xlow_local + nx_p; } // Now set up the local simulation structure. const int nx_local = xhigh_local - xlow_local; const int ny_local = yhigh_local - ylow_local; central2d_t* sim_local = central2d_init( grid_width, // This is wrong, but it's okay... see below. grid_height, // This is wrong, but it's okay... see below. nx_local, ny_local, 3, // nfield shallow2d_flux, shallow2d_speed, cfl); /* Why do we pass the global grid_width and grid_height to the initializer for sim_local? Remember, sim_local only deals with a piece of the global grid, so its height and width will be smaller. If we look at the code for the initializer, we'll notice that grid_width and grid_height are only used to calculate dx and dy. Every cell in the global grid has the same size. Thus, dx and dy for each local grid should be equal to that of the global one. When we initialized the global sim variable, it calculated dx and dy. Thus, dx and dy are already known, we just need to get them to sim_local. The idea here is to just pass some junk values to central2d_init when initializing sim_local. This initializer will calculate incorrect values for dx and dy (for the local grid). After initialization is done, we will overwrite the faulty values of dx and dy using the ones in sim. */ sim_local->dx = sim->dx; sim_local->dy = sim->dy; /* Now that sim_local has been initialized, we need to set up its U array. To do that, we need to copy the corresponding part of the global U array into the local U array. */ float* U_local = sim_local->U; float* U = sim -> U; for(int k = 0; k < 3; ++k) { // 3 = nfield for(int iy = 0; iy < ny_local; ++iy) { for(int ix = 0; ix < nx_local; ++ix) { /* We need to copy a piece of the global U to the local U. The first row of U_local corresponds to row xlow_local in U. Likewise, the first column of U_local corresponds to row ylow_local in U. */ U_local[central2d_offset(sim_local, k, ix, iy)] = U[central2d_offset(sim, k, xlow_local + ix , ylow_local + iy)]; } // for(int ix = 0; ix < sim->nx; ++ix) { } // for(int iy = 0; iy < sim->ny; ++iy) { } // for(int k = 0; k < 3; ++k) { // wait for all threads to set up their local arrays. #pragma omp barrier /* Now, at long last, the local simulation structures are set up and ready to go! Let's cycle through the timesteps! */ for (int i = 0; i < frames; ++i) { double t0 = omp_get_wtime(); int nstep = central2d_run(sim_local, sim, xlow_local, ylow_local, ftime); double t1 = omp_get_wtime(); // There is a barrier in central2d_run so we only need to use one of the // t1 - t0 double elapsed = t1 - t0; #pragma omp sections { // One section to run out diagnostic on U. #pragma omp section { solution_check(sim); tcompute += elapsed; printf(" Time: %e (%e for %d steps)\n", elapsed, elapsed/nstep, nstep); } // #pragma omp section // One section to write a frame of U to memory. #pragma omp section { viz_frame(viz, sim, vskip); } // #pragma omp section } // #pragma omp single { } // for (int i = 0; i < frames; ++i) { // Free the local sim structure. central2d_free(sim_local); } // #pragma omp parallel num_threads(4) printf("Total compute time: %e\n", tcompute); viz_close(viz); central2d_free(sim); return 0; } // int run_sim(lua_State* L) /** * ### Main * * The main routine has the usage pattern * * lshallow tests.lua args * * where `tests.lua` has a call to the `simulate` function to run * the simulation. The arguments after the Lua file name are passed * into the Lua script via a global array called `args`. */ int main(int argc, char** argv) { if (argc < 2) { fprintf(stderr, "Usage: %s fname args\n", argv[0]); return -1; } // if (argc < 2) { lua_State* L = luaL_newstate(); luaL_openlibs(L); lua_register(L, "simulate", run_sim); lua_newtable(L); for (int i = 2; i < argc; ++i) { lua_pushstring(L, argv[i]); lua_rawseti(L, 1, i-1); } // for (int i = 2; i < argc; ++i) { lua_setglobal(L, "args"); if (luaL_dofile(L, argv[1])) { printf("%s\n", lua_tostring(L,-1)); } // if (luaL_dofile(L, argv[1])) { lua_close(L); return 0; } // int main(int argc, char** argv)
rmsdcalc.c
// Copyright 2011 Stanford University // // MSMBuilder is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA // #include "Python.h" #include "numpy/arrayobject.h" #include <stdint.h> #include <stdio.h> #include "theobald_rmsd.h" #ifdef USE_OPENMP #include <omp.h> #endif #define CHECKARRAYTYPE(ary,name) if (PyArray_TYPE(ary) != NPY_FLOAT32) {\ PyErr_SetString(PyExc_ValueError,name" was not of type float32");\ return NULL;\ } #define CHECKARRAYCARRAY(ary,name) if ((PyArray_FLAGS(ary) & NPY_CARRAY) != NPY_CARRAY) {\ PyErr_SetString(PyExc_ValueError,name" was not a contiguous well-behaved array in C order");\ return NULL;\ } static PyObject *_getMultipleRMSDs_axis_major(PyObject *self, PyObject *args) { float *AData, *BData, *GAData, *distances, G_y; int nrealatoms=-1, npaddedatoms=-1, rowstride=-1, truestride=-1; npy_intp dim2[2], *arrayADims; PyArrayObject *ary_coorda, *ary_coordb, *ary_Ga, *ary_distances; if (!PyArg_ParseTuple(args, "iiiOOOf",&nrealatoms,&npaddedatoms,&rowstride, &ary_coorda, &ary_coordb, &ary_Ga, &G_y)) { return NULL; } // Get pointers to array data AData = (float*) PyArray_DATA(ary_coorda); BData = (float*) PyArray_DATA(ary_coordb); GAData = (float*) PyArray_DATA(ary_Ga); // TODO add sanity checking on Ga // TODO add sanity checking on structure dimensions A vs B arrayADims = PyArray_DIMS(ary_coorda); // Do some sanity checking on array dimensions // - make sure they are of float32 data type CHECKARRAYTYPE(ary_coorda,"Array A"); CHECKARRAYTYPE(ary_coordb,"Array B"); if (ary_coorda->nd != 3) { PyErr_SetString(PyExc_ValueError,"Array A did not have dimension 3"); return NULL; } if (ary_coordb->nd != 2) { PyErr_SetString(PyExc_ValueError,"Array B did not have dimension 2"); return NULL; } // make sure stride is 4 in last dimension (ie, is C-style and contiguous) CHECKARRAYCARRAY(ary_coorda,"Array A"); CHECKARRAYCARRAY(ary_coordb,"Array B"); // Create return array containing RMSDs dim2[0] = arrayADims[0]; dim2[1] = 1; ary_distances = (PyArrayObject*) PyArray_SimpleNew(1,dim2,NPY_FLOAT); distances = (float*) PyArray_DATA(ary_distances); truestride = npaddedatoms * 3; #ifdef USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < arrayADims[0]; i++) { float msd = msd_axis_major(nrealatoms, npaddedatoms, rowstride, (AData + i*truestride), BData, GAData[i], G_y); distances[i] = sqrtf(msd); } return PyArray_Return(ary_distances); } static PyObject *_getMultipleRMSDs_atom_major(PyObject *self, PyObject *args) { float *AData, *BData, *GAData, *distances, G_y; int nrealatoms=-1, npaddedatoms=-1; npy_intp dim2[2], *arrayADims; PyArrayObject *ary_coorda, *ary_coordb, *ary_Ga, *ary_distances; if (!PyArg_ParseTuple(args, "iiOOOf",&nrealatoms,&npaddedatoms, &ary_coorda, &ary_coordb, &ary_Ga, &G_y)) { return NULL; } // Get pointers to array data AData = (float*) PyArray_DATA(ary_coorda); BData = (float*) PyArray_DATA(ary_coordb); GAData = (float*) PyArray_DATA(ary_Ga); // TODO add sanity checking on Ga // TODO add sanity checking on structure dimensions A vs B arrayADims = PyArray_DIMS(ary_coorda); // Do some sanity checking on array dimensions // - make sure they are of float32 data type CHECKARRAYTYPE(ary_coorda,"Array A"); CHECKARRAYTYPE(ary_coordb,"Array B"); if (ary_coorda->nd != 3) { PyErr_SetString(PyExc_ValueError,"Array A did not have dimension 3"); return NULL; } if (ary_coordb->nd != 2) { PyErr_SetString(PyExc_ValueError,"Array B did not have dimension 2"); return NULL; } // make sure stride is 4 in last dimension (ie, is C-style and contiguous) CHECKARRAYCARRAY(ary_coorda,"Array A"); CHECKARRAYCARRAY(ary_coordb,"Array B"); // Create return array containing RMSDs dim2[0] = arrayADims[0]; dim2[1] = 1; ary_distances = (PyArrayObject*) PyArray_SimpleNew(1,dim2,NPY_FLOAT); distances = (float*) PyArray_DATA(ary_distances); #ifdef USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < arrayADims[0]; i++) { float msd = msd_atom_major(nrealatoms, npaddedatoms, (AData + i*npaddedatoms*3), BData, GAData[i], G_y); distances[i] = sqrtf(msd); } return PyArray_Return(ary_distances); } static PyMethodDef _rmsd_methods[] = { {"getMultipleRMSDs_axis_major", (PyCFunction)_getMultipleRMSDs_axis_major, METH_VARARGS, "Theobald RMSD calculation on axis-major centered structures."}, {"getMultipleRMSDs_atom_major", (PyCFunction)_getMultipleRMSDs_atom_major, METH_VARARGS, "Theobald RMSD calculation on atom-major centered structures."}, {NULL, NULL, 0, NULL} }; DL_EXPORT(void) initrmsdcalc(void) { Py_InitModule3("rmsdcalc", _rmsd_methods, "Core routines for IRMSD fast Theobald RMSD calculation."); import_array(); }
GB_unaryop__identity_fp64_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_fp64_int16 // op(A') function: GB_tran__identity_fp64_int16 // C type: double // A type: int16_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_fp64_int16 ( double *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_fp64_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
target_enter_data.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu #include <stdio.h> int main() { int i; // CHECK: addr=0x[[#%x,HOST_ADDR:]], size=[[#%u,SIZE:]] fprintf(stderr, "addr=%p, size=%ld\n", &i, sizeof i); // CHECK-NOT: Libomptarget #pragma omp target enter data map(alloc: i) #pragma omp target enter data map(present, alloc: i) #pragma omp target exit data map(delete: i) // CHECK: i is present fprintf(stderr, "i is present\n"); // CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#HOST_ADDR]] ([[#SIZE]] bytes) // CHECK: Libomptarget error: Call to getOrAllocTgtPtr returned null pointer ('present' map type modifier). // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory #pragma omp target enter data map(present, alloc: i) // CHECK-NOT: i is present fprintf(stderr, "i is present\n"); return 0; }
GB_unaryop__identity_uint32_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint32_int32 // op(A') function: GB_tran__identity_uint32_int32 // C type: uint32_t // A type: int32_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint32_int32 ( uint32_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint32_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
eval_template.c
// ${_warning_in_the_generated_file_not_to_edit} //#include <math.h> #include "power.c" //#include <stdio.h> %if SIZE_T == 'size_t': #include <stdlib.h> /* size_t */ %endif #include "newton_interval.h" #define BREAKEVEN 100 // TODO: determine a typical value for this // we only have integer exponents in pow, use specialized verion: #define pow(arg1, arg2) power(arg1, arg2) %for token in tokens: %for wy in range(1, max_wy+1): %for i in range(max_deriv[wy]+1): double ${token}_scalar_${wy}_${i}( const double x, const double * const restrict c, const ${SIZE_T} offset) { % for cse_token, cse_def in eval_cse[token][wy][i]: const double ${cse_token} = ${cse_def}; % endfor return ${eval_expr[token][wy][i][0]}; } void ${token}_eval_${wy}_${i}( const ${SIZE_T} nx, const double * const restrict x, const double * const restrict c, const ${SIZE_T} nout, const double * const restrict xout, double * const restrict yout ) { // derivative = 0 evaluates function value, 1 evaluates first // derivative and so on.. ${SIZE_T} xi = nx; // max: nx-1, nx considered "uninitialized" #pragma omp parallel for firstprivate(xi) schedule(static) if (nout > BREAKEVEN) for (${SIZE_T} oi=0; oi<nout; ++oi){ // Set xi if (xi == nx){ // xi == nx considered uninitialized! xi = get_interval(x, nx, xout[oi]); if (xi == -1) xi = 0; } else{ xi = get_interval_from_guess(x, nx, xout[oi], xi); if (xi == -1) xi = 0; } // Calculate value of yout[oi] at xout[oi] // for shifted coefficients. yout[oi] = ${token}_scalar_${wy}_${i}(xout[oi]-x[xi], c, xi*${wy}*2); } } %endfor %endfor %endfor #undef pow(arg1, arg2)
project2_Delaunoy_Crasset_EXPLICIT.c
#include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <mpi.h> #include <omp.h> #include "project2_Delaunoy_Crasset_EXPLICIT.h" #include "project2_Delaunoy_Crasset_IO.h" #define M_PI 3.14159265358979323846 /** * Compute the size of the arrays this process is responsible for * * Parameters: * rank: The rank of the calling process * nbproc: The number of processes * xSize: The discretization along the x axis * size_X: A pointer to an integer that will be set to the x size of eta and v * size_X_u: A pointer to an integer that will be set to the x size of u * size_X: A pointer to an integer that will be set to the x size of h * startval_X_h: A pointer to an integer that will be set to the starting value of h * endval_X_h: A pointer to an integer that will be set to the ending value of h */ void get_array_sizes(int rank, int nbproc, int xSize, int* size_X, int* size_X_u, int* size_X_h, int* startval_X_h, int* endval_X_h){ int mpi_xsize = xSize/nbproc; int startval_X, endval_X; int startval_X_u, endval_X_u; // When there is only one process if(nbproc == 1){ startval_X = 0; endval_X = xSize; *startval_X_h = 0; *endval_X_h = 2*xSize + 2; startval_X_u = 0; endval_X_u = xSize+1; } // When the process is the first else if(rank == 0){ startval_X = 0; endval_X = mpi_xsize; *startval_X_h = 0; *endval_X_h = 2*mpi_xsize + 2; startval_X_u = 0; endval_X_u = mpi_xsize; } // When the process lies in the middle of the matrix else if(rank == nbproc -1){ startval_X = rank * mpi_xsize + 1; endval_X = (rank+1) * mpi_xsize; *startval_X_h = 2 * rank * mpi_xsize + 2; *endval_X_h = 2 * (rank+1) * mpi_xsize + 2; startval_X_u = rank * mpi_xsize + 1; endval_X_u = (rank+1) * mpi_xsize + 1; } // When the process is the last else{ startval_X = rank * mpi_xsize + 1; endval_X = (rank+1) * mpi_xsize; *startval_X_h = 2 * rank * mpi_xsize + 2; *endval_X_h = 2 * (rank+1) * mpi_xsize + 2; startval_X_u = rank * mpi_xsize + 1; endval_X_u = (rank+1) * mpi_xsize; } // Add the remaining lines to first processes int remaining = xSize%nbproc; if(rank < remaining){ startval_X += rank; endval_X += rank + 1; startval_X_u += rank; endval_X_u += rank + 1; *startval_X_h += rank * 2; *endval_X_h += (rank + 1) * 2; } else{ *startval_X_h += remaining * 2; *endval_X_h += remaining * 2; } // Set variables *size_X = endval_X - startval_X + 1; *size_X_u = endval_X_u - startval_X_u + 1; *size_X_h = *endval_X_h - *startval_X_h + 1; } /** * Gather results from all process and save to disk * * Parameters: * eta: The eta array of the calling process * u: The u array of the calling process * v: The v array of the calling process * xSize: The discretization size along the x axis * ySize: The discretization size along the y axis * iteration: The iteration at which the save is performed * params: The structure holding the parameters of the run */ void gather_and_save(double** eta, double** u, double** v, int xSize, int ySize, unsigned int iteration, Parameters* params){ // Get process info int nbproc, myrank; MPI_Comm_size(MPI_COMM_WORLD, &nbproc); MPI_Comm_rank(MPI_COMM_WORLD, &myrank); // Get the array sizes int size_X, size_X_u, size_X_h, startval_X_h, endval_X_h; get_array_sizes(myrank, nbproc, xSize, &size_X, &size_X_u, &size_X_h, &startval_X_h, &endval_X_h); // Get number of threads int openMP_nbthreads = atoi(getenv("OMP_NUM_THREADS")); double* etaTotal; double* uTotal; double* vTotal; // Get process result double* etaPartial = transformMatrixToArray(eta, size_X, ySize +1); double* uPartial = transformMatrixToArray(u, size_X_u, ySize +1); double* vPartial = transformMatrixToArray(v, size_X, ySize +2); if(nbproc != 1){ // Compute the receive counts and displacements vectors int tmp_size_X; int tmp_size_X_u; int tmp_size_X_h; int tmp_startval_X_h; int tmp_endval_X_h; int* recvcounts_eta = malloc(nbproc * sizeof(int)); int* recvcounts_u = malloc(nbproc * sizeof(int)); int* recvcounts_v = malloc(nbproc * sizeof(int)); int* disp_eta = malloc(nbproc * sizeof(int)); int* disp_u = malloc(nbproc * sizeof(int)); int* disp_v = malloc(nbproc * sizeof(int)); if(!recvcounts_eta || !recvcounts_u || !recvcounts_v || !disp_eta || !disp_u || !disp_v){ fprintf(stderr, "error malloc recvcounts\n"); MPI_Finalize(); exit(-1); } for(int i = 0; i < nbproc; i++){ get_array_sizes(i, nbproc, xSize, &tmp_size_X, &tmp_size_X_u, &tmp_size_X_h, &tmp_startval_X_h, &tmp_endval_X_h); recvcounts_eta[i] = tmp_size_X * (ySize + 1); recvcounts_u[i] = tmp_size_X_u * (ySize + 1); recvcounts_v[i] = tmp_size_X * (ySize + 2); if(i == 0){ disp_eta[0] = 0; disp_u[0] = 0; disp_v[0] = 0; } if (i < nbproc - 1){ disp_eta[i + 1] = disp_eta[i] + tmp_size_X * (ySize + 1); disp_u[i + 1] = disp_u[i] + tmp_size_X_u * (ySize + 1); disp_v[i + 1] = disp_v[i] + tmp_size_X * (ySize + 2); } } // Gather the results of every process etaTotal = malloc((xSize + 1) * (ySize + 1)* sizeof(double)); uTotal = malloc((xSize + 2) * (ySize + 1)* sizeof(double)); vTotal = malloc((xSize + 1) * (ySize + 2)* sizeof(double)); MPI_Gatherv(etaPartial, (size_X) * (ySize + 1) , MPI_DOUBLE, etaTotal, recvcounts_eta, disp_eta, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Gatherv(uPartial, (size_X_u) * (ySize + 1) , MPI_DOUBLE, uTotal, recvcounts_u, disp_u, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Gatherv(vPartial, (size_X) * (ySize + 2) , MPI_DOUBLE, vTotal, recvcounts_v, disp_v, MPI_DOUBLE, 0, MPI_COMM_WORLD); // Free allocated memory free(etaPartial); free(uPartial); free(vPartial); free(recvcounts_eta); free(recvcounts_u); free(recvcounts_v); free(disp_eta); free(disp_u); free(disp_v); // Save results if(myrank == 0){ saveToDisk(etaTotal, uTotal, vTotal, xSize, ySize, iteration, params, nbproc, openMP_nbthreads); } } // In case there is only one process, save directly else{ etaTotal = transformMatrixToArray(eta, xSize + 1, ySize +1); uTotal = transformMatrixToArray(u, xSize + 2, ySize +1); vTotal= transformMatrixToArray(v, xSize + 1, ySize +2); saveToDisk(etaTotal, uTotal, vTotal, xSize, ySize, iteration, params, nbproc, openMP_nbthreads); } // Free allocated memory free(etaTotal); free(uTotal); free(vTotal); } /** * Solve the Navier-Stockes equations using explicit Euler method * * Parameters: * map: A structure containing the map infos * params: The parameters of the run * eta: A pointer to a matrix that will be set to the result of eta * u: A pointer to a matrix that will be set to the result of u * v: A pointer to a matrix that will be set to the result of v * * Returns: * An integer indicating whether the algorithm run with success or not */ int eulerExplicitMPI(Map* map, Parameters* params, double*** eta, double*** u, double*** v){ assert(map); assert(params); // Get process info int nbproc, myrank ; MPI_Comm_rank(MPI_COMM_WORLD, &myrank); MPI_Comm_size(MPI_COMM_WORLD, &nbproc); // Compute discretization size int xSize = (int)(map->a / params->deltaX); int ySize = (int)(map->b / params->deltaY); // Compute array sizes int size_X; int size_X_u; int size_X_h; int startval_X_h; int endval_X_h; get_array_sizes(myrank, nbproc, xSize, &size_X, &size_X_u, &size_X_h, &startval_X_h, &endval_X_h); // Allocate memory // eta in {0, 1, ..., a/dx}X{0, 1, ..., b/dy} double** etaCurr = allocateDoubleMatrix(size_X, ySize + 1); if(!etaCurr){ return -1; } double** etaNext = allocateDoubleMatrix(size_X, ySize + 1); if(!etaNext){ freeDoubleMatrix(etaCurr, size_X); return -1; } // u in {-1/2, 1/2, ..., a/dx + 1/2}X{0, 1, ..., b/dy} double** uCurr = allocateDoubleMatrix(size_X_u, ySize + 1); if(!uCurr){ freeDoubleMatrix(etaCurr,size_X); freeDoubleMatrix(etaNext,size_X); return -1; } double** uNext = allocateDoubleMatrix(size_X_u, ySize + 1); if(!uNext){ freeDoubleMatrix(etaCurr,size_X); freeDoubleMatrix(etaNext,size_X); freeDoubleMatrix(uCurr, size_X_u); return -1; } // v in {0, 1, .., a/dx}X{-1/2, 1/2, ..., b/dy + 1/2} double** vCurr = allocateDoubleMatrix(size_X, ySize + 2); if(!vCurr){ freeDoubleMatrix(etaCurr, size_X); freeDoubleMatrix(etaNext, size_X); freeDoubleMatrix(uCurr, size_X_u); freeDoubleMatrix(uNext, size_X_u); return -1; } double** vNext = allocateDoubleMatrix(size_X, ySize + 2); if(!vNext){ freeDoubleMatrix(etaCurr, size_X); freeDoubleMatrix(etaNext, size_X); freeDoubleMatrix(uCurr, size_X_u); freeDoubleMatrix(uNext, size_X_u); freeDoubleMatrix(vCurr, size_X); return -1; } // h in {-1/2, 0, 1/2, ..., a/dx, a/dx + 1/2}X{-1/2, 0, 1/2, ..., b/dy, b/dy + 1/2} double** h = allocateDoubleMatrix(size_X_h, 2 * ySize + 3); if(!h){ freeDoubleMatrix(etaCurr, size_X); freeDoubleMatrix(etaNext, size_X); freeDoubleMatrix(uCurr, size_X_u); freeDoubleMatrix(uNext, size_X_u); freeDoubleMatrix(vCurr, size_X); freeDoubleMatrix(vNext, size_X); return -1; } // Compute h from the provided map file for(int i = startval_X_h; i <= endval_X_h; i++){ for(int j = 0; j < 2 * ySize + 3; j++){ h[i-startval_X_h][j] = getGridValueAtDomainCoordinates(map, ((float)(i * xSize)/(xSize + 1)) * (params->deltaX / 2), ((float)(j * ySize)/(ySize + 1)) * (params->deltaY / 2)); } } // Initialize arrays #pragma omp parallel default(shared) { #pragma omp for schedule(static) for(int i = 0; i < size_X; i++){ for(int j = 0; j < ySize; j++){ etaCurr[i][j] = 0; } } #pragma omp for schedule(static) for(int i = 0; i < size_X_u; i++){ for(int j = 0; j < ySize; j++){ uCurr[i][j] = 0; } } #pragma omp for schedule(static) for(int i = 0; i < size_X; i++){ for(int j = 0; j < ySize; j++) vCurr[i][j] = 0; } } // Alocate arrays for receiving data from other process double* uReceived = malloc((ySize + 1) * sizeof(double)); double* etaReceived = malloc((ySize + 1) * sizeof(double)); // Starting time loop for(unsigned int t = 1; t <= params->TMax/params->deltaT; t++){ if(myrank == 0){ fprintf(stderr, "in loop t = %u\n", t); } // In a multiprocess environment, sending the leftmost column of u of the domain controlled // by the current process to the process with the previous rank if(nbproc != 1){ if(myrank == nbproc-1){ MPI_Send(uCurr[0], ySize + 1, MPI_DOUBLE, myrank - 1, 62, MPI_COMM_WORLD); //Tag 62 is for u }else if (myrank == 0){ MPI_Recv(uReceived, ySize + 1, MPI_DOUBLE, 1, 62, MPI_COMM_WORLD, MPI_STATUS_IGNORE); }else{ MPI_Sendrecv(uCurr[0], ySize + 1, MPI_DOUBLE, myrank - 1, 62, uReceived, ySize + 1, MPI_DOUBLE, myrank + 1, 62,MPI_COMM_WORLD, MPI_STATUS_IGNORE); } } // Compute the next value of eta #pragma omp parallel default(shared) { // Process etaNext in one block if(myrank == nbproc-1 || nbproc == 1){ #pragma omp for schedule(static) for(int i = 0; i < size_X; i++){ for(int j = 0; j < ySize + 1; j++){ etaNext[i][j] = (-(h[2*i+2][2*j+1] * uCurr[i+1][j] - h[2*i][2*j+1] * uCurr[i][j]) / params->deltaX -(h[2*i+1][2*j+2] * vCurr[i][j+1] - h[2*i+1][2*j] * vCurr[i][j]) / params->deltaY) * params->deltaT + etaCurr[i][j]; } } } else{ // Process the last column separately from the rest because we need to use uReceived from the // the process with higher rank #pragma omp for schedule(static) for(int i = 0; i < size_X - 1; i++){ for(int j = 0; j < ySize + 1; j++){ etaNext[i][j] = (-(h[2*i+2][2*j+1] * uCurr[i+1][j] - h[2*i][2*j+1] * uCurr[i][j]) / params->deltaX -(h[2*i+1][2*j+2] * vCurr[i][j+1] - h[2*i+1][2*j] * vCurr[i][j]) / params->deltaY) * params->deltaT + etaCurr[i][j]; } } #pragma omp for schedule(static) for(int j = 0; j < ySize + 1; j++){ etaNext[size_X-1][j] = (-(h[2*(size_X-1)+2][2*j+1] * uReceived[j] - h[2*(size_X-1)][2*j+1] * uCurr[size_X-1][j]) / params->deltaX -(h[2*(size_X-1)+1][2*j+2] * vCurr[size_X-1][j+1] - h[2*(size_X-1)+1][2*j] * vCurr[size_X-1][j]) / params->deltaY) * params->deltaT + etaCurr[size_X-1][j]; } } } // In a multiprocess environment, sending the rightmost column of eta of the domain controlled // by the current process to the process with the previous rank if(nbproc != 1){ if(myrank == 0){ MPI_Send(etaCurr[size_X-1], ySize + 1, MPI_DOUBLE, 1, 42, MPI_COMM_WORLD); //Tag 42 is for eta }else if (myrank == nbproc -1){ MPI_Recv(etaReceived, ySize + 1, MPI_DOUBLE, myrank - 1, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE); }else{ MPI_Sendrecv(etaCurr[size_X-1], ySize + 1, MPI_DOUBLE, myrank + 1, 42, etaReceived, ySize + 1, MPI_DOUBLE, myrank - 1, 42,MPI_COMM_WORLD, MPI_STATUS_IGNORE); } } // uNext Boundary conditions if(myrank == 0 || nbproc == 1){ for(int i = 0; i < ySize + 1; i++){ uNext[0][i] = 0; } } if(myrank == nbproc -1 || nbproc == 1){ for(int i = 0; i < ySize + 1; i++){ uNext[size_X_u - 1][i] = 0; } } // Compute the next value of u #pragma omp parallel default(shared) { // Process uNext in one block if(nbproc == 1){ #pragma omp for schedule(static) for(int i = 1; i < size_X_u-1; i++){ for(int j = 0; j < ySize + 1; j++){ uNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i-1][j]) / params->deltaX -params->gamma * uCurr[i][j]) * params->deltaT + uCurr[i][j]; } } } else if(myrank == 0){ #pragma omp for schedule(static) for(int i = 1; i < size_X_u; i++){ for(int j = 0; j < ySize + 1; j++){ uNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i-1][j]) / params->deltaX -params->gamma * uCurr[i][j]) * params->deltaT + uCurr[i][j]; } } } else if(myrank == nbproc-1){ // Process the first column separately from the rest because we need to use etaReceived from the // the process with lower rank // The last process has a smaller size along the x axis #pragma omp for schedule(static) for(int j = 0; j < ySize + 1; j++){ uNext[0][j] = (-params->g * (etaCurr[0][j] - etaReceived[j]) / params->deltaX -params->gamma * uCurr[0][j]) * params->deltaT + uCurr[0][j]; } #pragma omp for schedule(static) for(int i = 1; i < size_X_u-1; i++){ for(int j = 0; j < ySize + 1; j++){ uNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i-1][j]) / params->deltaX -params->gamma * uCurr[i][j]) * params->deltaT + uCurr[i][j]; } } } else{ // Process the first column separately from the rest because we need to use etaReceived from the // the process with lower rank #pragma omp for schedule(static) for(int j = 0; j < ySize + 1; j++){ uNext[0][j] = (-params->g * (etaCurr[0][j] - etaReceived[j]) / params->deltaX -params->gamma * uCurr[0][j]) * params->deltaT + uCurr[0][j]; } #pragma omp for schedule(static) for(int i = 1; i < size_X_u; i++){ for(int j = 0; j < ySize + 1; j++){ uNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i-1][j]) / params->deltaX -params->gamma * uCurr[i][j]) * params->deltaT + uCurr[i][j]; } } } } // Boundary conditions for v for(int i = 0; i < size_X; i++) vNext[i][0] = 0; // Setting the excitation on the rightmost column of the whole domain space for(int i = 0; i < size_X; i++){ if(params->s == 0) //Sinusoidal excitation vNext[i][ySize+1] = params->A * sin(2 * M_PI * params->f * t * params->deltaT); else // Exponentially decaying excitation vNext[i][ySize+1] = params->A * sin(2 * M_PI * params->f * t * params->deltaT) * exp(- t * params->deltaT / 500); } // Compute the next value of v #pragma omp parallel default(shared) { #pragma omp for schedule(static) for(int i = 0; i < size_X; i++){ for(int j = 1; j < ySize + 1; j++){ vNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i][j-1]) / params->deltaY -params->gamma * vCurr[i][j]) * params->deltaT + vCurr[i][j]; } } } // Process 0 gathers the sub-matrices of the processes and saves them to disk if(params->S != 0 && t % params->S == 0){ gather_and_save(etaNext,uNext,vNext, xSize,ySize, t, params); } // Go to next step double** tmp; tmp = etaCurr; etaCurr = etaNext; etaNext = tmp; tmp = uCurr; uCurr = uNext; uNext = tmp; tmp = vCurr; vCurr = vNext; vNext = tmp; } // Return values *eta = etaCurr; *u = uCurr; *v = vCurr; freeDoubleMatrix(etaNext, size_X); freeDoubleMatrix(uNext, size_X_u); freeDoubleMatrix(vNext, size_X); freeDoubleMatrix((double**) h, size_X_h); free(uReceived); free(etaReceived); return 0; }
GB_unaryop__identity_uint32_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint32_uint32 // op(A') function: GB_tran__identity_uint32_uint32 // C type: uint32_t // A type: uint32_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint32_uint32 ( uint32_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
prop3DAcoTTIDenQ_DEO2_FDTD.h
#ifndef PROP3DACOTTIDENQ_DEO2_FDTD_H #define PROP3DACOTTIDENQ_DEO2_FDTD_H #include <omp.h> #include <stddef.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <fftw3.h> #include <complex> #define MIN(x,y) ((x)<(y)?(x):(y)) class Prop3DAcoTTIDenQ_DEO2_FDTD { public: const bool _freeSurface; const long _nbx, _nby, _nbz, _nthread, _nx, _ny, _nz, _nsponge; const float _dx, _dy, _dz, _dt; const float _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz; const float _fDefault = 0.85f; float * __restrict__ _v = NULL; float * __restrict__ _eps = NULL; float * __restrict__ _eta = NULL; float * __restrict__ _b = NULL; float * __restrict__ _sinTheta = NULL; float * __restrict__ _cosTheta = NULL; float * __restrict__ _sinPhi = NULL; float * __restrict__ _cosPhi = NULL; float * __restrict__ _f = NULL; float * __restrict__ _dtOmegaInvQ = NULL; float * __restrict__ _pSpace = NULL; float * __restrict__ _mSpace = NULL; float * __restrict__ _tmpPg1a = NULL; float * __restrict__ _tmpPg2a = NULL; float * __restrict__ _tmpPg3a = NULL; float * __restrict__ _tmpMg1a = NULL; float * __restrict__ _tmpMg2a = NULL; float * __restrict__ _tmpMg3a = NULL; float * __restrict__ _tmpPg1b = NULL; float * __restrict__ _tmpPg2b = NULL; float * __restrict__ _tmpPg3b = NULL; float * __restrict__ _tmpMg1b = NULL; float * __restrict__ _tmpMg2b = NULL; float * __restrict__ _tmpMg3b = NULL; float * _pOld = NULL; float * _pCur = NULL; float * _mOld = NULL; float * _mCur = NULL; Prop3DAcoTTIDenQ_DEO2_FDTD( bool freeSurface, long nthread, long nx, long ny, long nz, long nsponge, float dx, float dy, float dz, float dt, const long nbx, const long nby, const long nbz) : _freeSurface(freeSurface), _nthread(nthread), _nx(nx), _ny(ny), _nz(nz), _nsponge(nsponge), _nbx(nbx), _nby(nby), _nbz(nbz), _dx(dx), _dy(dy), _dz(dz), _dt(dt), _c8_1(+1225.0 / 1024.0), _c8_2(-245.0 / 3072.0), _c8_3(+49.0 / 5120.0), _c8_4(-5.0 / 7168.0), _invDx(1.0 / _dx), _invDy(1.0 / _dy), _invDz(1.0 / _dz) { // Allocate arrays _v = new float[_nx * _ny * _nz]; _eps = new float[_nx * _ny * _nz]; _eta = new float[_nx * _ny * _nz]; _b = new float[_nx * _ny * _nz]; _sinTheta = new float[_nx * _ny * _nz]; _cosTheta = new float[_nx * _ny * _nz]; _sinPhi = new float[_nx * _ny * _nz]; _cosPhi = new float[_nx * _ny * _nz]; _f = new float[_nx * _ny * _nz]; _dtOmegaInvQ = new float[_nx * _ny * _nz]; _pSpace = new float[_nx * _ny * _nz]; _mSpace = new float[_nx * _ny * _nz]; _tmpPg1a = new float[_nx * _ny * _nz]; _tmpPg2a = new float[_nx * _ny * _nz]; _tmpPg3a = new float[_nx * _ny * _nz]; _tmpMg1a = new float[_nx * _ny * _nz]; _tmpMg2a = new float[_nx * _ny * _nz]; _tmpMg3a = new float[_nx * _ny * _nz]; _tmpPg1b = new float[_nx * _ny * _nz]; _tmpPg2b = new float[_nx * _ny * _nz]; _tmpPg3b = new float[_nx * _ny * _nz]; _tmpMg1b = new float[_nx * _ny * _nz]; _tmpMg2b = new float[_nx * _ny * _nz]; _tmpMg3b = new float[_nx * _ny * _nz]; _pOld = new float[_nx * _ny * _nz]; _pCur = new float[_nx * _ny * _nz]; _mOld = new float[_nx * _ny * _nz]; _mCur = new float[_nx * _ny * _nz]; numaFirstTouch(_nx, _ny, _nz, _nthread, _v, _eps, _eta, _b, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _f, _dtOmegaInvQ, _pSpace, _mSpace, _tmpPg1a, _tmpPg2a, _tmpPg3a, _tmpMg1a, _tmpMg2a, _tmpMg3a, _tmpPg1b, _tmpPg2b, _tmpPg3b, _tmpMg1b, _tmpMg2b, _tmpMg3b, _pOld, _pCur, _mOld, _mCur, _nbx, _nby, _nbz); } #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void numaFirstTouch( const long nx, const long ny, const long nz, const long nthread, float * __restrict__ v, float * __restrict__ eps, float * __restrict__ eta, float * __restrict__ b, float * __restrict__ sinTheta, float * __restrict__ cosTheta, float * __restrict__ sinPhi, float * __restrict__ cosPhi, float * __restrict__ f, float * __restrict__ dtOmegaInvQ, float * __restrict__ pSpace, float * __restrict__ mSpace, float * __restrict__ tmpPg1a, float * __restrict__ tmpPg2a, float * __restrict__ tmpPg3a, float * __restrict__ tmpMg1a, float * __restrict__ tmpMg2a, float * __restrict__ tmpMg3a, float * __restrict__ tmpPg1b, float * __restrict__ tmpPg2b, float * __restrict__ tmpPg3b, float * __restrict__ tmpMg1b, float * __restrict__ tmpMg2b, float * __restrict__ tmpMg3b, float * __restrict__ pOld, float * __restrict__ pCur, float * __restrict__ mOld, float * __restrict__ mCur, const long BX_3D, const long BY_3D, const long BZ_3D) { const long nx4 = nx - 4; const long ny4 = ny - 4; const long nz4 = nz - 4; #pragma omp parallel for collapse(3) num_threads(nthread) schedule(static) for (long bx = 4; bx < nx4; bx += BX_3D) { for (long by = 4; by < ny4; by += BY_3D) { for (long bz = 4; bz < nz4; bz += BZ_3D) { const long kxmax = MIN(bx + BX_3D, nx4); const long kymax = MIN(by + BY_3D, ny4); const long kzmax = MIN(bz + BZ_3D, nz4); for (long kx = bx; kx < kxmax; kx++) { for (long ky = by; ky < kymax; ky++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long k = kx * _ny * _nz + ky * _nz + kz; v[k] = 0; eps[k] = 0; eta[k] = 0; b[k] = 0; sinTheta[k] = 0; cosTheta[k] = 0; sinPhi[k] = 0; cosPhi[k] = 0; f[k] = 0; dtOmegaInvQ[k] = 0; pSpace[k] = 0; mSpace[k] = 0; tmpPg1a[k] = 0; tmpPg2a[k] = 0; tmpPg3a[k] = 0; tmpMg1a[k] = 0; tmpMg2a[k] = 0; tmpMg3a[k] = 0; tmpPg1b[k] = 0; tmpPg2b[k] = 0; tmpPg3b[k] = 0; tmpMg1b[k] = 0; tmpMg2b[k] = 0; tmpMg3b[k] = 0; pOld[k] = 0; pCur[k] = 0; mOld[k] = 0; mCur[k] = 0; } } } } } } // annulus for (long k = 0; k < 4; k++) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < nx; kx++) { #pragma omp simd for (long ky = 0; ky < ny; ky++) { const long kindex1 = kx * ny * nz + ky * nz + k; const long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k); v[kindex1] = eps[kindex1] = eta[kindex1] = b[kindex1] = sinTheta[kindex1] = cosTheta[kindex1] = sinPhi[kindex1] = cosPhi[kindex1] = f[kindex1] = dtOmegaInvQ[kindex1] = pSpace[kindex1] = mSpace[kindex1] = tmpPg1a[kindex1] = tmpPg2a[kindex1] = tmpPg3a[kindex1] = tmpMg1a[kindex1] = tmpMg2a[kindex1] = tmpMg3a[kindex1] = tmpPg1b[kindex1] = tmpPg2b[kindex1] = tmpPg3b[kindex1] = tmpMg1b[kindex1] = tmpMg2b[kindex1] = tmpMg3b[kindex1] = pOld[kindex1] = pCur[kindex1] = mOld[kindex1] = mCur[kindex1] = 0; v[kindex2] = eps[kindex2] = eta[kindex2] = b[kindex2] = sinTheta[kindex2] = cosTheta[kindex2] = sinPhi[kindex2] = cosPhi[kindex2] = f[kindex2] = dtOmegaInvQ[kindex2] = pSpace[kindex2] = mSpace[kindex2] = tmpPg1a[kindex2] = tmpPg2a[kindex2] = tmpPg3a[kindex2] = tmpMg1a[kindex2] = tmpMg2a[kindex2] = tmpMg3a[kindex2] = tmpPg1b[kindex2] = tmpPg2b[kindex2] = tmpPg3b[kindex2] = tmpMg1b[kindex2] = tmpMg2b[kindex2] = tmpMg3b[kindex2] = pOld[kindex2] = pCur[kindex2] = mOld[kindex2] = mCur[kindex2] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < nx; kx++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { const long kindex1 = kx * ny * nz + k * nz + kz; const long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz; v[kindex1] = eps[kindex1] = eta[kindex1] = b[kindex1] = sinTheta[kindex1] = cosTheta[kindex1] = sinPhi[kindex1] = cosPhi[kindex1] = f[kindex1] = dtOmegaInvQ[kindex1] = pSpace[kindex1] = mSpace[kindex1] = tmpPg1a[kindex1] = tmpPg2a[kindex1] = tmpPg3a[kindex1] = tmpMg1a[kindex1] = tmpMg2a[kindex1] = tmpMg3a[kindex1] = tmpPg1b[kindex1] = tmpPg2b[kindex1] = tmpPg3b[kindex1] = tmpMg1b[kindex1] = tmpMg2b[kindex1] = tmpMg3b[kindex1] = pOld[kindex1] = pCur[kindex1] = mOld[kindex1] = mCur[kindex1] = 0; v[kindex2] = eps[kindex2] = eta[kindex2] = b[kindex2] = sinTheta[kindex2] = cosTheta[kindex2] = sinPhi[kindex2] = cosPhi[kindex2] = f[kindex2] = dtOmegaInvQ[kindex2] = pSpace[kindex2] = mSpace[kindex2] = tmpPg1a[kindex2] = tmpPg2a[kindex2] = tmpPg3a[kindex2] = tmpMg1a[kindex2] = tmpMg2a[kindex2] = tmpMg3a[kindex2] = tmpPg1b[kindex2] = tmpPg2b[kindex2] = tmpPg3b[kindex2] = tmpMg1b[kindex2] = tmpMg2b[kindex2] = tmpMg3b[kindex2] = pOld[kindex2] = pCur[kindex2] = mOld[kindex2] = mCur[kindex2] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long ky = 0; ky < ny; ky++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { const long kindex1 = k * ny * nz + ky * nz + kz; const long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz; v[kindex1] = eps[kindex1] = eta[kindex1] = b[kindex1] = sinTheta[kindex1] = cosTheta[kindex1] = sinPhi[kindex1] = cosPhi[kindex1] = f[kindex1] = dtOmegaInvQ[kindex1] = pSpace[kindex1] = mSpace[kindex1] = tmpPg1a[kindex1] = tmpPg2a[kindex1] = tmpPg3a[kindex1] = tmpMg1a[kindex1] = tmpMg2a[kindex1] = tmpMg3a[kindex1] = tmpPg1b[kindex1] = tmpPg2b[kindex1] = tmpPg3b[kindex1] = tmpMg1b[kindex1] = tmpMg2b[kindex1] = tmpMg3b[kindex1] = pOld[kindex1] = pCur[kindex1] = mOld[kindex1] = mCur[kindex1] = 0; v[kindex2] = eps[kindex2] = eta[kindex2] = b[kindex2] = sinTheta[kindex2] = cosTheta[kindex2] = sinPhi[kindex2] = cosPhi[kindex2] = f[kindex2] = dtOmegaInvQ[kindex2] = pSpace[kindex2] = mSpace[kindex2] = tmpPg1a[kindex2] = tmpPg2a[kindex2] = tmpPg3a[kindex2] = tmpMg1a[kindex2] = tmpMg2a[kindex2] = tmpMg3a[kindex2] = tmpPg1b[kindex2] = tmpPg2b[kindex2] = tmpPg3b[kindex2] = tmpMg1b[kindex2] = tmpMg2b[kindex2] = tmpMg3b[kindex2] = pOld[kindex2] = pCur[kindex2] = mOld[kindex2] = mCur[kindex2] = 0; } } } } ~Prop3DAcoTTIDenQ_DEO2_FDTD() { if (_v != NULL) delete [] _v; if (_eps != NULL) delete [] _eps; if (_eta != NULL) delete [] _eta; if (_sinTheta != NULL) delete [] _sinTheta; if (_cosTheta != NULL) delete [] _cosTheta; if (_sinPhi != NULL) delete [] _sinPhi; if (_cosPhi != NULL) delete [] _cosPhi; if (_b != NULL) delete [] _b; if (_f != NULL) delete [] _f; if (_dtOmegaInvQ != NULL) delete [] _dtOmegaInvQ; if (_pSpace != NULL) delete [] _pSpace; if (_mSpace != NULL) delete [] _mSpace; if (_tmpPg1a != NULL) delete [] _tmpPg1a; if (_tmpPg2a != NULL) delete [] _tmpPg2a; if (_tmpPg3a != NULL) delete [] _tmpPg3a; if (_tmpMg1a != NULL) delete [] _tmpMg1a; if (_tmpMg2a != NULL) delete [] _tmpMg2a; if (_tmpMg3a != NULL) delete [] _tmpMg3a; if (_tmpPg1b != NULL) delete [] _tmpPg1b; if (_tmpPg2b != NULL) delete [] _tmpPg2b; if (_tmpPg3b != NULL) delete [] _tmpPg3b; if (_tmpMg1b != NULL) delete [] _tmpMg1b; if (_tmpMg2b != NULL) delete [] _tmpMg2b; if (_tmpMg3b != NULL) delete [] _tmpMg3b; if (_pOld != NULL) delete [] _pOld; if (_pCur != NULL) delete [] _pCur; if (_mOld != NULL) delete [] _mOld; if (_mCur != NULL) delete [] _mCur; } #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif void info() { printf("\n"); printf("Prop3DAcoTTIDenQ_DEO2_FDTD\n"); printf(" nx,ny,nz; %5ld %5ld %5ld\n", _nx, _ny, _nz); printf(" nthread,nsponge,fs; %5ld %5ld %5d\n", _nthread, _nsponge, _freeSurface); printf(" X min,max,inc; %+16.8f %+16.8f %+16.8f\n", 0.0, _dx * (_nx - 1), _dx); printf(" Y min,max,inc; %+16.8f %+16.8f %+16.8f\n", 0.0, _dy * (_ny - 1), _dy); printf(" Z min,max,inc; %+16.8f %+16.8f %+16.8f\n", 0.0, _dz * (_nz - 1), _dz); } /** * Notes * - User must have called setupDtOmegaInvQ_2D to initialize the array _dtOmegaInvQ * - wavefield arrays are switched in this call * pCur -> pOld * pOld -> pCur * mCur -> mOld * mOld -> mCur * 2918.07.26 * - Ken's advice results in 6 derivatives per state variable instead of 11 * - Refactoring from [T D- R-] [S R+ D+] to [T D- ] [R- S R+ D+] * T 2nd order time update * D+ forward staggered spatial derivative * D- backward staggered spatial derivative * S material parameter sandwich terms * R+ forward rotation * R- backward rotation */ #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void timeStep() { applyRotationSandwichRotation_TTI_FirstDerivatives3D_PlusHalf_TwoFields( _freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz, _pCur, _mCur, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _eps, _eta, _f, _b, _tmpPg1a, _tmpPg2a, _tmpPg3a, _tmpMg1a, _tmpMg2a, _tmpMg3a, _nbx, _nby, _nbz); applyFirstDerivatives3D_MinusHalf_TimeUpdate_Nonlinear( _freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz, _dt, _tmpPg1a, _tmpPg2a, _tmpPg3a, _tmpMg1a, _tmpMg2a, _tmpMg3a, _v, _b, _dtOmegaInvQ, _pCur, _mCur, _pSpace, _mSpace, _pOld, _mOld, _nbx, _nby, _nbz); // swap pointers float *pswap = _pOld; _pOld = _pCur; _pCur = pswap; float *mswap = _mOld; _mOld = _mCur; _mCur = mswap; } /** * Scale spatial derivatives by v^2/b to make them temporal derivs */ #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void scaleSpatialDerivatives() { #pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static) for (long bx = 0; bx < _nx; bx += _nbx) { for (long by = 0; by < _ny; by += _nby) { for (long bz = 0; bz < _nz; bz += _nbz) { const long kxmax = MIN(bx + _nbx, _nx); const long kymax = MIN(by + _nby, _ny); const long kzmax = MIN(bz + _nbz, _nz); for (long kx = bx; kx < kxmax; kx++) { for (long ky = by; ky < kymax; ky++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long k = kx * _ny * _nz + ky * _nz + kz; const float v2OverB = _v[k] * _v[k] / _b[k]; _pSpace[k] *= v2OverB; _mSpace[k] *= v2OverB; } } } } } } } /** * Add the Born source at the current time * * User must have: * - called the nonlinear forward * - saved 2nd time derivative of pressure at corresponding time index in array dp2 * - Born source term will be injected into the _pCur array */ #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void forwardBornInjection_V(float *dVel, float *wavefieldDP, float *wavefieldDM) { #pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static) for (long bx = 0; bx < _nx; bx += _nbx) { for (long by = 0; by < _ny; by += _nby) { for (long bz = 0; bz < _nz; bz += _nbz) { const long kxmax = MIN(bx + _nbx, _nx); const long kymax = MIN(by + _nby, _ny); const long kzmax = MIN(bz + _nbz, _nz); for (long kx = bx; kx < kxmax; kx++) { for (long ky = by; ky < kymax; ky++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long k = kx * _ny * _nz + ky * _nz + kz; const float V = _v[k]; const float B = _b[k]; const float dV = dVel[k]; // V^2/b factor to "clear" the b/V^2 factor on L_tP and L_tM // _dt^2 factor is from the finite difference approximation // 2B_dV/V^3 factor is from the linearization const float factor = 2 * _dt * _dt * dV / V; _pCur[k] += factor * wavefieldDP[k]; _mCur[k] += factor * wavefieldDM[k]; } } } } } } } #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void forwardBornInjection_VEA(float *dVel, float *dEps, float *dEta, float *wavefieldP, float *wavefieldM, float *wavefieldDP, float *wavefieldDM) { // Right side spatial derivatives for the Born source applyFirstDerivatives3D_TTI_PlusHalf( _freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz, wavefieldP, wavefieldP, wavefieldP, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _tmpPg1a, _tmpPg2a, _tmpPg3a, _nbx, _nby, _nbz); applyFirstDerivatives3D_TTI_PlusHalf( _freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz, wavefieldM, wavefieldM, wavefieldM, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _tmpMg1a, _tmpMg2a, _tmpMg3a, _nbx, _nby, _nbz); // Sandwich terms for the Born source // note flipped sign for Z derivative term between P and M #pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static) for (long bx = 0; bx < _nx; bx += _nbx) { for (long by = 0; by < _ny; by += _nby) { for (long bz = 0; bz < _nz; bz += _nbz) { const long kxmax = MIN(bx + _nbx, _nx); const long kymax = MIN(by + _nby, _ny); const long kzmax = MIN(bz + _nbz, _nz); for (long kx = bx; kx < kxmax; kx++) { for (long ky = by; ky < kymax; ky++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long k = kx * _ny * _nz + ky * _nz + kz; const float V = _v[k]; const float E = _eps[k]; const float A = _eta[k]; const float B = _b[k]; const float F = _f[k]; const float dV = dVel[k]; const float dE = dEps[k]; const float dA = dEta[k]; _tmpPg1b[k] = (+2 * B * dE) *_tmpPg1a[k]; _tmpPg2b[k] = (+2 * B * dE) *_tmpPg2a[k]; _tmpPg3b[k] = (-2 * B * F * A * dA) *_tmpPg3a[k] + (dA * B * F * (1 - 2 * A * A) / sqrt(1 - A * A)) * _tmpMg3a[k]; _tmpMg1b[k] = 0; _tmpMg2b[k] = 0; _tmpMg3b[k] = (+2 * B * F * A * dA) *_tmpMg3a[k] + (dA * B * F * (1 - 2 * A * A) / sqrt(1 - A * A)) * _tmpPg3a[k]; } } } } } } // Left side spatial derivatives for the Born source applyFirstDerivatives3D_TTI_MinusHalf( _freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz, _tmpPg1b, _tmpPg2b, _tmpPg3b, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _tmpPg1a, _tmpPg2a, _tmpPg3a, _nbx, _nby, _nbz); applyFirstDerivatives3D_TTI_MinusHalf( _freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz, _tmpMg1b, _tmpMg2b, _tmpMg3b, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _tmpMg1a, _tmpMg2a, _tmpMg3a, _nbx, _nby, _nbz); // add the born source at the current time #pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static) for (long bx = 0; bx < _nx; bx += _nbx) { for (long by = 0; by < _ny; by += _nby) { for (long bz = 0; bz < _nz; bz += _nbz) { const long kxmax = MIN(bx + _nbx, _nx); const long kymax = MIN(by + _nby, _ny); const long kzmax = MIN(bz + _nbz, _nz); for (long kx = bx; kx < kxmax; kx++) { for (long ky = by; ky < kymax; ky++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long k = kx * _ny * _nz + ky * _nz + kz; const float V = _v[k]; const float B = _b[k]; const float dV = dVel[k]; const float dt2v2OverB = _dt * _dt * V * V / B; const float factor = 2 * B * dV / (V * V * V); _pCur[k] += dt2v2OverB * (factor * wavefieldDP[k] + _tmpPg1a[k] + _tmpPg2a[k] + _tmpPg3a[k]); _mCur[k] += dt2v2OverB * (factor * wavefieldDM[k] + _tmpMg1a[k] + _tmpMg2a[k] + _tmpMg3a[k]); } } } } } } } /** * Accumulate the Born image term at the current time * * User must have: * - called the nonlinear forward * - saved 2nd time derivative of pressure at corresponding time index in array dp2 * - Born image term will be accumulated iu the _dm array */ #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void adjointBornAccumulation_V(float *dVel, float *wavefieldDP, float *wavefieldDM) { #pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static) for (long bx = 0; bx < _nx; bx += _nbx) { for (long by = 0; by < _ny; by += _nby) { for (long bz = 0; bz < _nz; bz += _nbz) { const long kxmax = MIN(bx + _nbx, _nx); const long kymax = MIN(by + _nby, _ny); const long kzmax = MIN(bz + _nbz, _nz); for (long kx = bx; kx < kxmax; kx++) { for (long ky = by; ky < kymax; ky++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long k = kx * _ny * _nz + ky * _nz + kz; const float V = _v[k]; const float B = _b[k]; const float factor = 2 * B / (V * V * V); dVel[k] += factor * (wavefieldDP[k] * _pOld[k] + wavefieldDM[k] * _mOld[k]); } } } } } } } /** * Apply Kz wavenumber filter for up/down wavefield seperation * Faqi, 2011, Geophysics https://library.seg.org/doi/full/10.1190/1.3533914 * * We handle the FWI and RTM imaging conditions with a condition inside the OMP loop * * Example Kz filtering with 8 samples * frequency | +0 | +1 | +2 | +3 | N | -3 | -2 | -1 | * original | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | * upgoing | 0 | X | X | X | 4 | 5 | 6 | 7 | * dngoing | 0 | 1 | 2 | 3 | 4 | X | X | X | */ #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void adjointBornAccumulation_wavefieldsep_V(float *dVel, float *wavefieldDP, float *wavefieldDM, const long isFWI) { const long nfft = 2 * _nz; const float scale = 1.0f / (float)(nfft); // FWI: adj wavefield is dngoing // RTM: adj wavefield is upgoing const long kfft_adj = (isFWI) ? 0 : nfft / 2; std::complex<float> * __restrict__ tmp = new std::complex<float>[nfft]; fftwf_plan planForward = fftwf_plan_dft_1d(nfft, reinterpret_cast<fftwf_complex*>(tmp), reinterpret_cast<fftwf_complex*>(tmp), +1, FFTW_ESTIMATE); fftwf_plan planInverse = fftwf_plan_dft_1d(nfft, reinterpret_cast<fftwf_complex*>(tmp), reinterpret_cast<fftwf_complex*>(tmp), -1, FFTW_ESTIMATE); delete [] tmp; #pragma omp parallel num_threads(_nthread) { std::complex<float> * __restrict__ tmp_nlf_p = new std::complex<float>[nfft]; std::complex<float> * __restrict__ tmp_adj_p = new std::complex<float>[nfft]; std::complex<float> * __restrict__ tmp_nlf_m = new std::complex<float>[nfft]; std::complex<float> * __restrict__ tmp_adj_m = new std::complex<float>[nfft]; #pragma omp for collapse(2) schedule(static) for (long bx = 0; bx < _nx; bx += _nbx) { for (long by = 0; by < _ny; by += _nby) { const long kxmax = MIN(bx + _nbx, _nx); const long kymax = MIN(by + _nby, _ny); for (long kx = bx; kx < kxmax; kx++) { for (long ky = by; ky < kymax; ky++) { #pragma omp simd for (long kfft = 0; kfft < nfft; kfft++) { tmp_nlf_p[kfft] = 0; tmp_adj_p[kfft] = 0; tmp_nlf_m[kfft] = 0; tmp_adj_m[kfft] = 0; } #pragma omp simd for (long kz = 0; kz < _nz; kz++) { const long k = kx * _ny * _nz + ky * _nz + kz; tmp_nlf_p[kz] = scale * wavefieldDP[k]; tmp_adj_p[kz] = scale * _pOld[k]; tmp_nlf_m[kz] = scale * wavefieldDM[k]; tmp_adj_m[kz] = scale * _mOld[k]; } fftwf_execute_dft(planForward, reinterpret_cast<fftwf_complex*>(tmp_nlf_p), reinterpret_cast<fftwf_complex*>(tmp_nlf_p)); fftwf_execute_dft(planForward, reinterpret_cast<fftwf_complex*>(tmp_adj_p), reinterpret_cast<fftwf_complex*>(tmp_adj_p)); fftwf_execute_dft(planForward, reinterpret_cast<fftwf_complex*>(tmp_nlf_m), reinterpret_cast<fftwf_complex*>(tmp_nlf_m)); fftwf_execute_dft(planForward, reinterpret_cast<fftwf_complex*>(tmp_adj_m), reinterpret_cast<fftwf_complex*>(tmp_adj_m)); // upgoing: zero the positive frequencies, excluding Nyquist // dngoing: zero the negative frequencies, excluding Nyquist #pragma omp simd for (long k = 1; k < nfft / 2; k++) { tmp_nlf_p[nfft / 2 + k] = 0; tmp_adj_p[kfft_adj + k] = 0; tmp_nlf_m[nfft / 2 + k] = 0; tmp_adj_m[kfft_adj + k] = 0; } fftwf_execute_dft(planInverse, reinterpret_cast<fftwf_complex*>(tmp_nlf_p), reinterpret_cast<fftwf_complex*>(tmp_nlf_p)); fftwf_execute_dft(planInverse, reinterpret_cast<fftwf_complex*>(tmp_adj_p), reinterpret_cast<fftwf_complex*>(tmp_adj_p)); fftwf_execute_dft(planInverse, reinterpret_cast<fftwf_complex*>(tmp_nlf_m), reinterpret_cast<fftwf_complex*>(tmp_nlf_m)); fftwf_execute_dft(planInverse, reinterpret_cast<fftwf_complex*>(tmp_adj_m), reinterpret_cast<fftwf_complex*>(tmp_adj_m)); // Faqi eq 10 // Applied to FWI: [Sup * Rdn] // Applied to RTM: [Sup * Rup] #pragma omp simd for (long kz = 0; kz < _nz; kz++) { const long k = kx * _ny * _nz + ky * _nz + kz; const float V = _v[k]; const float B = _b[k]; const float factor = 2 * B / (V * V * V); dVel[k] += factor * (real(tmp_nlf_p[kz] * tmp_adj_p[kz]) + real(tmp_nlf_m[kz] * tmp_adj_m[kz])); } } // end loop over ky } // end loop over kx } // end loop over by } // end loop over bx delete [] tmp_nlf_p; delete [] tmp_adj_p; delete [] tmp_nlf_m; delete [] tmp_adj_m; } // end parallel region fftwf_destroy_plan(planForward); fftwf_destroy_plan(planInverse); } #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void adjointBornAccumulation_VEA(float *dVel, float *dEps, float *dEta, float *wavefieldP, float *wavefieldM, float *wavefieldDP, float *wavefieldDM) { // Right side spatial derivatives for the adjoint accumulation applyFirstDerivatives3D_TTI_PlusHalf( _freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz, wavefieldP, wavefieldP, wavefieldP, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _tmpPg1a, _tmpPg2a, _tmpPg3a, _nbx, _nby, _nbz); applyFirstDerivatives3D_TTI_PlusHalf( _freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz, wavefieldM, wavefieldM, wavefieldM, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _tmpMg1a, _tmpMg2a, _tmpMg3a, _nbx, _nby, _nbz); applyFirstDerivatives3D_TTI_PlusHalf( _freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz, _pOld, _pOld, _pOld, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _tmpPg1b, _tmpPg2b, _tmpPg3b, _nbx, _nby, _nbz); applyFirstDerivatives3D_TTI_PlusHalf( _freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz, _mOld, _mOld, _mOld, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _tmpMg1b, _tmpMg2b, _tmpMg3b, _nbx, _nby, _nbz); // Sandwich terms for the adjoint accumulation #pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static) for (long bx = 0; bx < _nx; bx += _nbx) { for (long by = 0; by < _ny; by += _nby) { for (long bz = 0; bz < _nz; bz += _nbz) { const long kxmax = MIN(bx + _nbx, _nx); const long kymax = MIN(by + _nby, _ny); const long kzmax = MIN(bz + _nbz, _nz); for (long kx = bx; kx < kxmax; kx++) { for (long ky = by; ky < kymax; ky++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long k = kx * _ny * _nz + ky * _nz + kz; const float V = _v[k]; const float E = _eps[k]; const float A = _eta[k]; const float B = _b[k]; const float F = _f[k]; const float factor = 2 * B / (V * V * V); dVel[k] += factor * (wavefieldDP[k] * _pOld[k] + wavefieldDM[k] * _mOld[k]); dEps[k] += (-2 * B * _tmpPg1a[k] * _tmpPg1b[k] -2 * B * _tmpPg2a[k] * _tmpPg2b[k]); const float partP = 2 * B * F * A * _tmpPg3a[k] - (B * F * (1 - 2 * A * A) / sqrt(1 - A * A)) * _tmpMg3a[k]; const float partM = 2 * B * F * A * _tmpMg3a[k] + (B * F * (1 - 2 * A * A) / sqrt(1 - A * A)) * _tmpPg3a[k]; dEta[k] += (partP * _tmpPg3b[k] - partM * _tmpMg3b[k]); } } } } } } } template<class Type> #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline static void applyRotationSandwichRotation_TTI_FirstDerivatives3D_PlusHalf_TwoFields( const long freeSurface, const long nx, const long ny, const long nz, const long nthread, const Type c8_1, const Type c8_2, const Type c8_3, const Type c8_4, const Type invDx, const Type invDy, const Type invDz, Type * __restrict__ inP, Type * __restrict__ inM, float * __restrict__ sinTheta, float * __restrict__ cosTheta, float * __restrict__ sinPhi, float * __restrict__ cosPhi, Type * __restrict__ fieldEps, Type * __restrict__ fieldEta, Type * __restrict__ fieldVsVp, Type * __restrict__ fieldBuoy, Type * __restrict__ outPx, Type * __restrict__ outPy, Type * __restrict__ outPz, Type * __restrict__ outMx, Type * __restrict__ outMy, Type * __restrict__ outMz, const long BX_3D, const long BY_3D, const long BZ_3D) { const long nx4 = nx - 4; const long ny4 = ny - 4; const long nz4 = nz - 4; const long nynz = ny * nz; // zero output array: note only the annulus that is in the absorbing boundary needs to be zeroed for (long k = 0; k < 4; k++) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < nx; kx++) { #pragma omp simd for (long ky = 0; ky < ny; ky++) { const long kindex1 = kx * ny * nz + ky * nz + k; const long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k); outPx[kindex1] = outPx[kindex2] = 0; outPy[kindex1] = outPy[kindex2] = 0; outPz[kindex1] = outPz[kindex2] = 0; outMx[kindex1] = outMx[kindex2] = 0; outMy[kindex1] = outMy[kindex2] = 0; outMz[kindex1] = outMz[kindex2] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < nx; kx++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { const long kindex1 = kx * ny * nz + k * nz + kz; const long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz; outPx[kindex1] = outPx[kindex2] = 0; outPy[kindex1] = outPy[kindex2] = 0; outPz[kindex1] = outPz[kindex2] = 0; outMx[kindex1] = outMx[kindex2] = 0; outMy[kindex1] = outMy[kindex2] = 0; outMz[kindex1] = outMz[kindex2] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long ky = 0; ky < ny; ky++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { long kindex1 = k * ny * nz + ky * nz + kz; long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz; outPx[kindex1] = outPx[kindex2] = 0; outPy[kindex1] = outPy[kindex2] = 0; outPz[kindex1] = outPz[kindex2] = 0; outMx[kindex1] = outMx[kindex2] = 0; outMy[kindex1] = outMy[kindex2] = 0; outMz[kindex1] = outMz[kindex2] = 0; } } } // interior #pragma omp parallel for collapse(3) num_threads(nthread) schedule(static) for (long bx = 4; bx < nx4; bx += BX_3D) { for (long by = 4; by < ny4; by += BY_3D) { for (long bz = 4; bz < nz4; bz += BZ_3D) { const long kxmax = MIN(bx + BX_3D, nx4); const long kymax = MIN(by + BY_3D, ny4); const long kzmax = MIN(bz + BZ_3D, nz4); for (long kx = bx; kx < kxmax; kx++) { const long kxnynz = kx * nynz; for (long ky = by; ky < kymax; ky++) { const long kynz = ky * nz; const long kxnynz_kynz = kxnynz + kynz; #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long kynz_kz = + kynz + kz; const Type stencilPDx = c8_1 * (- inP[(kx+0) * nynz + kynz_kz] + inP[(kx+1) * nynz + kynz_kz]) + c8_2 * (- inP[(kx-1) * nynz + kynz_kz] + inP[(kx+2) * nynz + kynz_kz]) + c8_3 * (- inP[(kx-2) * nynz + kynz_kz] + inP[(kx+3) * nynz + kynz_kz]) + c8_4 * (- inP[(kx-3) * nynz + kynz_kz] + inP[(kx+4) * nynz + kynz_kz]); const Type stencilPDy = c8_1 * (- inP[kxnynz + (ky+0) * nz + kz] + inP[kxnynz + (ky+1) * nz + kz]) + c8_2 * (- inP[kxnynz + (ky-1) * nz + kz] + inP[kxnynz + (ky+2) * nz + kz]) + c8_3 * (- inP[kxnynz + (ky-2) * nz + kz] + inP[kxnynz + (ky+3) * nz + kz]) + c8_4 * (- inP[kxnynz + (ky-3) * nz + kz] + inP[kxnynz + (ky+4) * nz + kz]); const Type stencilPDz = c8_1 * (- inP[kxnynz_kynz + (kz+0)] + inP[kxnynz_kynz + (kz+1)]) + c8_2 * (- inP[kxnynz_kynz + (kz-1)] + inP[kxnynz_kynz + (kz+2)]) + c8_3 * (- inP[kxnynz_kynz + (kz-2)] + inP[kxnynz_kynz + (kz+3)]) + c8_4 * (- inP[kxnynz_kynz + (kz-3)] + inP[kxnynz_kynz + (kz+4)]); const Type stencilMDx = c8_1 * (- inM[(kx+0) * nynz + kynz_kz] + inM[(kx+1) * nynz + kynz_kz]) + c8_2 * (- inM[(kx-1) * nynz + kynz_kz] + inM[(kx+2) * nynz + kynz_kz]) + c8_3 * (- inM[(kx-2) * nynz + kynz_kz] + inM[(kx+3) * nynz + kynz_kz]) + c8_4 * (- inM[(kx-3) * nynz + kynz_kz] + inM[(kx+4) * nynz + kynz_kz]); const Type stencilMDy = c8_1 * (- inM[kxnynz + (ky+0) * nz + kz] + inM[kxnynz + (ky+1) * nz + kz]) + c8_2 * (- inM[kxnynz + (ky-1) * nz + kz] + inM[kxnynz + (ky+2) * nz + kz]) + c8_3 * (- inM[kxnynz + (ky-2) * nz + kz] + inM[kxnynz + (ky+3) * nz + kz]) + c8_4 * (- inM[kxnynz + (ky-3) * nz + kz] + inM[kxnynz + (ky+4) * nz + kz]); const Type stencilMDz = c8_1 * (- inM[kxnynz_kynz + (kz+0)] + inM[kxnynz_kynz + (kz+1)]) + c8_2 * (- inM[kxnynz_kynz + (kz-1)] + inM[kxnynz_kynz + (kz+2)]) + c8_3 * (- inM[kxnynz_kynz + (kz-2)] + inM[kxnynz_kynz + (kz+3)]) + c8_4 * (- inM[kxnynz_kynz + (kz-3)] + inM[kxnynz_kynz + (kz+4)]); const Type dpdx = invDx * stencilPDx; const Type dpdy = invDy * stencilPDy; const Type dpdz = invDz * stencilPDz; const Type dmdx = invDx * stencilMDx; const Type dmdy = invDy * stencilMDy; const Type dmdz = invDz * stencilMDz; const long k = kx * ny * nz + ky * nz + kz; const float sinThetaCosPhi = sinTheta[k] * cosPhi[k]; const float sinThetaSinPhi = sinTheta[k] * sinPhi[k]; const Type fieldEta2 = fieldEta[k] * fieldEta[k]; const Type fieldBuoyVsVp = fieldBuoy[k] * fieldVsVp[k]; const Type g3P = sinThetaCosPhi * dpdx + sinThetaSinPhi * dpdy + cosTheta[k] * dpdz; const Type g3M = sinThetaCosPhi * dmdx + sinThetaSinPhi * dmdy + cosTheta[k] * dmdz; const Type tmpFE = fieldBuoyVsVp * fieldEta[k] * sqrt(1 - fieldEta2); const Type tmpP = - fieldBuoy[k] * (2 * fieldEps[k] + fieldVsVp[k] * fieldEta2) * g3P + tmpFE * g3M; const Type tmpM = tmpFE * g3P + fieldBuoyVsVp * fieldEta2 * g3M; const Type tmpE = fieldBuoy[k] * (1 + 2 * fieldEps[k]); const Type tmpF = fieldBuoy[k] * (1 - fieldVsVp[k]); outPx[k] = tmpE * dpdx + sinThetaCosPhi * tmpP; outPy[k] = tmpE * dpdy + sinThetaSinPhi * tmpP; outPz[k] = tmpE * dpdz + cosTheta[k] * tmpP; outMx[k] = tmpF * dmdx + sinThetaCosPhi * tmpM; outMy[k] = tmpF * dmdy + sinThetaSinPhi * tmpM; outMz[k] = tmpF * dmdz + cosTheta[k] * tmpM; } } } } } } // roll on free surface if (freeSurface) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 4; kx < nx4; kx++) { const long kxnynz = kx * nynz; #pragma omp simd for (long ky = 4; ky < ny4; ky++) { const long kynz = ky * nz; const long kxnynz_kynz = kxnynz + kynz; // kz = 0 -- 1/2 cells below free surface for Z derivative, at free surface for X/Y derivative // X and Y derivatives are identically zero { const Type stencilPDz0 = c8_1 * (- inP[kxnynz_kynz + 0] + inP[kxnynz_kynz + 1]) + c8_2 * (+ inP[kxnynz_kynz + 1] + inP[kxnynz_kynz + 2]) + c8_3 * (+ inP[kxnynz_kynz + 2] + inP[kxnynz_kynz + 3]) + c8_4 * (+ inP[kxnynz_kynz + 3] + inP[kxnynz_kynz + 4]); const Type stencilMDz0 = c8_1 * (- inM[kxnynz_kynz + 0] + inM[kxnynz_kynz + 1]) + c8_2 * (+ inM[kxnynz_kynz + 1] + inM[kxnynz_kynz + 2]) + c8_3 * (+ inM[kxnynz_kynz + 2] + inM[kxnynz_kynz + 3]) + c8_4 * (+ inM[kxnynz_kynz + 3] + inM[kxnynz_kynz + 4]); const Type dpdx = 0; const Type dpdy = 0; const Type dpdz = invDz * stencilPDz0; const Type dmdx = 0; const Type dmdy = 0; const Type dmdz = invDz * stencilMDz0; const long k = kx * ny * nz + ky * nz + 0; const float sinThetaCosPhi = sinTheta[k] * cosPhi[k]; const float sinThetaSinPhi = sinTheta[k] * sinPhi[k]; const Type fieldEta2 = fieldEta[k] * fieldEta[k]; const Type fieldBuoyVsVp = fieldBuoy[k] * fieldVsVp[k]; const Type g3P = sinThetaCosPhi * dpdx + sinThetaSinPhi * dpdy + cosTheta[k] * dpdz; const Type g3M = sinThetaCosPhi * dmdx + sinThetaSinPhi * dmdy + cosTheta[k] * dmdz; const Type tmpFE = fieldBuoyVsVp * fieldEta[k] * sqrt(1 - fieldEta2); const Type tmpP = - fieldBuoy[k] * (2 * fieldEps[k] + fieldVsVp[k] * fieldEta2) * g3P + tmpFE * g3M; const Type tmpM = tmpFE * g3P + fieldBuoyVsVp * fieldEta2 * g3M; const Type tmpE = fieldBuoy[k] * (1 + 2 * fieldEps[k]); const Type tmpF = fieldBuoy[k] * (1 - fieldVsVp[k]); outPx[k] = tmpE * dpdx + sinThetaCosPhi * tmpP; outPy[k] = tmpE * dpdy + sinThetaSinPhi * tmpP; outPz[k] = tmpE * dpdz + cosTheta[k] * tmpP; outMx[k] = tmpF * dmdx + sinThetaCosPhi * tmpM; outMy[k] = tmpF * dmdy + sinThetaSinPhi * tmpM; outMz[k] = tmpF * dmdz + cosTheta[k] * tmpM; } // kz = 1 -- 1 1/2 cells below free surface for Z derivative, 1 cells below for X/Y derivative { const Type stencilPDx1 = c8_1 * (- inP[(kx+0) * nynz + kynz + 1] + inP[(kx+1) * nynz + kynz + 1]) + c8_2 * (- inP[(kx-1) * nynz + kynz + 1] + inP[(kx+2) * nynz + kynz + 1]) + c8_3 * (- inP[(kx-2) * nynz + kynz + 1] + inP[(kx+3) * nynz + kynz + 1]) + c8_4 * (- inP[(kx-3) * nynz + kynz + 1] + inP[(kx+4) * nynz + kynz + 1]); const Type stencilPDy1 = c8_1 * (- inP[kxnynz + (ky+0) * nz + 1] + inP[kxnynz + (ky+1) * nz + 1]) + c8_2 * (- inP[kxnynz + (ky-1) * nz + 1] + inP[kxnynz + (ky+2) * nz + 1]) + c8_3 * (- inP[kxnynz + (ky-2) * nz + 1] + inP[kxnynz + (ky+3) * nz + 1]) + c8_4 * (- inP[kxnynz + (ky-3) * nz + 1] + inP[kxnynz + (ky+4) * nz + 1]); const Type stencilPDz1 = c8_1 * (- inP[kxnynz_kynz + 1] + inP[kxnynz_kynz + 2]) + c8_2 * (- inP[kxnynz_kynz + 0] + inP[kxnynz_kynz + 3]) + c8_3 * (+ inP[kxnynz_kynz + 1] + inP[kxnynz_kynz + 4]) + c8_4 * (+ inP[kxnynz_kynz + 2] + inP[kxnynz_kynz + 5]); const Type stencilMDx1 = c8_1 * (- inM[(kx+0) * nynz + kynz + 1] + inM[(kx+1) * nynz + kynz + 1]) + c8_2 * (- inM[(kx-1) * nynz + kynz + 1] + inM[(kx+2) * nynz + kynz + 1]) + c8_3 * (- inM[(kx-2) * nynz + kynz + 1] + inM[(kx+3) * nynz + kynz + 1]) + c8_4 * (- inM[(kx-3) * nynz + kynz + 1] + inM[(kx+4) * nynz + kynz + 1]); const Type stencilMDy1 = c8_1 * (- inM[kxnynz + (ky+0) * nz + 1] + inM[kxnynz + (ky+1) * nz + 1]) + c8_2 * (- inM[kxnynz + (ky-1) * nz + 1] + inM[kxnynz + (ky+2) * nz + 1]) + c8_3 * (- inM[kxnynz + (ky-2) * nz + 1] + inM[kxnynz + (ky+3) * nz + 1]) + c8_4 * (- inM[kxnynz + (ky-3) * nz + 1] + inM[kxnynz + (ky+4) * nz + 1]); const Type stencilMDz1 = c8_1 * (- inM[kxnynz_kynz + 1] + inM[kxnynz_kynz + 2]) + c8_2 * (- inM[kxnynz_kynz + 0] + inM[kxnynz_kynz + 3]) + c8_3 * (+ inM[kxnynz_kynz + 1] + inM[kxnynz_kynz + 4]) + c8_4 * (+ inM[kxnynz_kynz + 2] + inM[kxnynz_kynz + 5]); const Type dpdx = invDx * stencilPDx1; const Type dpdy = invDy * stencilPDy1; const Type dpdz = invDz * stencilPDz1; const Type dmdx = invDx * stencilMDx1; const Type dmdy = invDy * stencilMDy1; const Type dmdz = invDz * stencilMDz1; const long k = kx * ny * nz + ky * nz + 1; const float sinThetaCosPhi = sinTheta[k] * cosPhi[k]; const float sinThetaSinPhi = sinTheta[k] * sinPhi[k]; const Type fieldEta2 = fieldEta[k] * fieldEta[k]; const Type fieldBuoyVsVp = fieldBuoy[k] * fieldVsVp[k]; const Type g3P = sinThetaCosPhi * dpdx + sinThetaSinPhi * dpdy + cosTheta[k] * dpdz; const Type g3M = sinThetaCosPhi * dmdx + sinThetaSinPhi * dmdy + cosTheta[k] * dmdz; const Type tmpFE = fieldBuoyVsVp * fieldEta[k] * sqrt(1 - fieldEta2); const Type tmpP = - fieldBuoy[k] * (2 * fieldEps[k] + fieldVsVp[k] * fieldEta2) * g3P + tmpFE * g3M; const Type tmpM = tmpFE * g3P + fieldBuoyVsVp * fieldEta2 * g3M; const Type tmpE = fieldBuoy[k] * (1 + 2 * fieldEps[k]); const Type tmpF = fieldBuoy[k] * (1 - fieldVsVp[k]); outPx[k] = tmpE * dpdx + sinThetaCosPhi * tmpP; outPy[k] = tmpE * dpdy + sinThetaSinPhi * tmpP; outPz[k] = tmpE * dpdz + cosTheta[k] * tmpP; outMx[k] = tmpF * dmdx + sinThetaCosPhi * tmpM; outMy[k] = tmpF * dmdy + sinThetaSinPhi * tmpM; outMz[k] = tmpF * dmdz + cosTheta[k] * tmpM; } // kz = 2 -- 2 1/2 cells below free surface for Z derivative, 2 cells below for X/Y derivative { const Type stencilPDx2 = c8_1 * (- inP[(kx+0) * nynz + kynz + 2] + inP[(kx+1) * nynz + kynz + 2]) + c8_2 * (- inP[(kx-1) * nynz + kynz + 2] + inP[(kx+2) * nynz + kynz + 2]) + c8_3 * (- inP[(kx-2) * nynz + kynz + 2] + inP[(kx+3) * nynz + kynz + 2]) + c8_4 * (- inP[(kx-3) * nynz + kynz + 2] + inP[(kx+4) * nynz + kynz + 2]); const Type stencilPDy2 = c8_1 * (- inP[kxnynz + (ky+0) * nz + 2] + inP[kxnynz + (ky+1) * nz + 2]) + c8_2 * (- inP[kxnynz + (ky-1) * nz + 2] + inP[kxnynz + (ky+2) * nz + 2]) + c8_3 * (- inP[kxnynz + (ky-2) * nz + 2] + inP[kxnynz + (ky+3) * nz + 2]) + c8_4 * (- inP[kxnynz + (ky-3) * nz + 2] + inP[kxnynz + (ky+4) * nz + 2]); const Type stencilPDz2 = c8_1 * (- inP[kxnynz_kynz + 2] + inP[kxnynz_kynz + 3]) + c8_2 * (- inP[kxnynz_kynz + 1] + inP[kxnynz_kynz + 4]) + c8_3 * (- inP[kxnynz_kynz + 0] + inP[kxnynz_kynz + 5]) + c8_4 * (+ inP[kxnynz_kynz + 1] + inP[kxnynz_kynz + 6]); const Type stencilMDx2 = c8_1 * (- inM[(kx+0) * nynz + kynz + 2] + inM[(kx+1) * nynz + kynz + 2]) + c8_2 * (- inM[(kx-1) * nynz + kynz + 2] + inM[(kx+2) * nynz + kynz + 2]) + c8_3 * (- inM[(kx-2) * nynz + kynz + 2] + inM[(kx+3) * nynz + kynz + 2]) + c8_4 * (- inM[(kx-3) * nynz + kynz + 2] + inM[(kx+4) * nynz + kynz + 2]); const Type stencilMDy2 = c8_1 * (- inM[kxnynz + (ky+0) * nz + 2] + inM[kxnynz + (ky+1) * nz + 2]) + c8_2 * (- inM[kxnynz + (ky-1) * nz + 2] + inM[kxnynz + (ky+2) * nz + 2]) + c8_3 * (- inM[kxnynz + (ky-2) * nz + 2] + inM[kxnynz + (ky+3) * nz + 2]) + c8_4 * (- inM[kxnynz + (ky-3) * nz + 2] + inM[kxnynz + (ky+4) * nz + 2]); const Type stencilMDz2 = c8_1 * (- inM[kxnynz_kynz + 2] + inM[kxnynz_kynz + 3]) + c8_2 * (- inM[kxnynz_kynz + 1] + inM[kxnynz_kynz + 4]) + c8_3 * (- inM[kxnynz_kynz + 0] + inM[kxnynz_kynz + 5]) + c8_4 * (+ inM[kxnynz_kynz + 1] + inM[kxnynz_kynz + 6]); const Type dpdx = invDx * stencilPDx2; const Type dpdy = invDy * stencilPDy2; const Type dpdz = invDz * stencilPDz2; const Type dmdx = invDx * stencilMDx2; const Type dmdy = invDy * stencilMDy2; const Type dmdz = invDz * stencilMDz2; const long k = kx * ny * nz + ky * nz + 2; const float sinThetaCosPhi = sinTheta[k] * cosPhi[k]; const float sinThetaSinPhi = sinTheta[k] * sinPhi[k]; const Type fieldEta2 = fieldEta[k] * fieldEta[k]; const Type fieldBuoyVsVp = fieldBuoy[k] * fieldVsVp[k]; const Type g3P = sinThetaCosPhi * dpdx + sinThetaSinPhi * dpdy + cosTheta[k] * dpdz; const Type g3M = sinThetaCosPhi * dmdx + sinThetaSinPhi * dmdy + cosTheta[k] * dmdz; const Type tmpFE = fieldBuoyVsVp * fieldEta[k] * sqrt(1 - fieldEta2); const Type tmpP = - fieldBuoy[k] * (2 * fieldEps[k] + fieldVsVp[k] * fieldEta2) * g3P + tmpFE * g3M; const Type tmpM = tmpFE * g3P + fieldBuoyVsVp * fieldEta2 * g3M; const Type tmpE = fieldBuoy[k] * (1 + 2 * fieldEps[k]); const Type tmpF = fieldBuoy[k] * (1 - fieldVsVp[k]); outPx[k] = tmpE * dpdx + sinThetaCosPhi * tmpP; outPy[k] = tmpE * dpdy + sinThetaSinPhi * tmpP; outPz[k] = tmpE * dpdz + cosTheta[k] * tmpP; outMx[k] = tmpF * dmdx + sinThetaCosPhi * tmpM; outMy[k] = tmpF * dmdy + sinThetaSinPhi * tmpM; outMz[k] = tmpF * dmdz + cosTheta[k] * tmpM; } // kz = 3 -- 3 1/2 cells below free surface for Z derivative, 3 cells below for X/Y derivative { const Type stencilPDx3 = c8_1 * (- inP[(kx+0) * nynz + kynz + 3] + inP[(kx+1) * nynz + kynz + 3]) + c8_2 * (- inP[(kx-1) * nynz + kynz + 3] + inP[(kx+2) * nynz + kynz + 3]) + c8_3 * (- inP[(kx-2) * nynz + kynz + 3] + inP[(kx+3) * nynz + kynz + 3]) + c8_4 * (- inP[(kx-3) * nynz + kynz + 3] + inP[(kx+4) * nynz + kynz + 3]); const Type stencilPDy3 = c8_1 * (- inP[kxnynz + (ky+0) * nz + 3] + inP[kxnynz + (ky+1) * nz + 3]) + c8_2 * (- inP[kxnynz + (ky-1) * nz + 3] + inP[kxnynz + (ky+2) * nz + 3]) + c8_3 * (- inP[kxnynz + (ky-2) * nz + 3] + inP[kxnynz + (ky+3) * nz + 3]) + c8_4 * (- inP[kxnynz + (ky-3) * nz + 3] + inP[kxnynz + (ky+4) * nz + 3]); const Type stencilPDz3 = c8_1 * (- inP[kxnynz_kynz + 3] + inP[kxnynz_kynz + 4]) + c8_2 * (- inP[kxnynz_kynz + 2] + inP[kxnynz_kynz + 5]) + c8_3 * (- inP[kxnynz_kynz + 1] + inP[kxnynz_kynz + 6]) + c8_4 * (- inP[kxnynz_kynz + 0] + inP[kxnynz_kynz + 7]); const Type stencilMDx3 = c8_1 * (- inM[(kx+0) * nynz + kynz + 3] + inM[(kx+1) * nynz + kynz + 3]) + c8_2 * (- inM[(kx-1) * nynz + kynz + 3] + inM[(kx+2) * nynz + kynz + 3]) + c8_3 * (- inM[(kx-2) * nynz + kynz + 3] + inM[(kx+3) * nynz + kynz + 3]) + c8_4 * (- inM[(kx-3) * nynz + kynz + 3] + inM[(kx+4) * nynz + kynz + 3]); const Type stencilMDy3 = c8_1 * (- inM[kxnynz + (ky+0) * nz + 3] + inM[kxnynz + (ky+1) * nz + 3]) + c8_2 * (- inM[kxnynz + (ky-1) * nz + 3] + inM[kxnynz + (ky+2) * nz + 3]) + c8_3 * (- inM[kxnynz + (ky-2) * nz + 3] + inM[kxnynz + (ky+3) * nz + 3]) + c8_4 * (- inM[kxnynz + (ky-3) * nz + 3] + inM[kxnynz + (ky+4) * nz + 3]); const Type stencilMDz3 = c8_1 * (- inM[kxnynz_kynz + 3] + inM[kxnynz_kynz + 4]) + c8_2 * (- inM[kxnynz_kynz + 2] + inM[kxnynz_kynz + 5]) + c8_3 * (- inM[kxnynz_kynz + 1] + inM[kxnynz_kynz + 6]) + c8_4 * (- inM[kxnynz_kynz + 0] + inM[kxnynz_kynz + 7]); const Type dpdx = invDx * stencilPDx3; const Type dpdy = invDy * stencilPDy3; const Type dpdz = invDz * stencilPDz3; const Type dmdx = invDx * stencilMDx3; const Type dmdy = invDy * stencilMDy3; const Type dmdz = invDz * stencilMDz3; const long k = kx * ny * nz + ky * nz + 3; const float sinThetaCosPhi = sinTheta[k] * cosPhi[k]; const float sinThetaSinPhi = sinTheta[k] * sinPhi[k]; const Type fieldEta2 = fieldEta[k] * fieldEta[k]; const Type fieldBuoyVsVp = fieldBuoy[k] * fieldVsVp[k]; const Type g3P = sinThetaCosPhi * dpdx + sinThetaSinPhi * dpdy + cosTheta[k] * dpdz; const Type g3M = sinThetaCosPhi * dmdx + sinThetaSinPhi * dmdy + cosTheta[k] * dmdz; const Type tmpFE = fieldBuoyVsVp * fieldEta[k] * sqrt(1 - fieldEta2); const Type tmpP = - fieldBuoy[k] * (2 * fieldEps[k] + fieldVsVp[k] * fieldEta2) * g3P + tmpFE * g3M; const Type tmpM = tmpFE * g3P + fieldBuoyVsVp * fieldEta2 * g3M; const Type tmpE = fieldBuoy[k] * (1 + 2 * fieldEps[k]); const Type tmpF = fieldBuoy[k] * (1 - fieldVsVp[k]); outPx[k] = tmpE * dpdx + sinThetaCosPhi * tmpP; outPy[k] = tmpE * dpdy + sinThetaSinPhi * tmpP; outPz[k] = tmpE * dpdz + cosTheta[k] * tmpP; outMx[k] = tmpF * dmdx + sinThetaCosPhi * tmpM; outMy[k] = tmpF * dmdy + sinThetaSinPhi * tmpM; outMz[k] = tmpF * dmdz + cosTheta[k] * tmpM; } } } } } /** * Combines * applyFirstDerivatives_MinusHalf(P) * secondOrderTimeUpdate_BubeConservation(P) * applyFirstDerivatives_MinusHalf(M) * secondOrderTimeUpdate_BubeConservation(M) * * Updates pOld and mOld with second order time update * see notes in method secondOrderTimeUpdate_BubeConservation() * * Nonlinear method: outputs the spatial derivatives for serialization * Linear method: does not output the spatial derivatives */ template<class Type> #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline static void applyFirstDerivatives3D_MinusHalf_TimeUpdate_Nonlinear( const long freeSurface, const long nx, const long ny, const long nz, const long nthread, const Type c8_1, const Type c8_2, const Type c8_3, const Type c8_4, const Type invDx, const Type invDy, const Type invDz, const Type dtMod, const Type * __restrict__ const tmpPX, const Type * __restrict__ const tmpPY, const Type * __restrict__ const tmpPZ, const Type * __restrict__ const tmpMX, const Type * __restrict__ const tmpMY, const Type * __restrict__ const tmpMZ, const Type * __restrict__ const fieldVel, const Type * __restrict__ const fieldBuoy, const Type * __restrict__ const dtOmegaInvQ, const Type * __restrict__ const pCur, const Type * __restrict__ const mCur, Type * __restrict__ pSpace, Type * __restrict__ mSpace, Type * __restrict__ pOld, Type * __restrict__ mOld, const long BX_3D, const long BY_3D, const long BZ_3D) { const long nx4 = nx - 4; const long ny4 = ny - 4; const long nz4 = nz - 4; const long nynz = ny * nz; const Type dt2 = dtMod * dtMod; // zero output array: note only the annulus that is in the absorbing boundary needs to be zeroed for (long k = 0; k < 4; k++) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < nx; kx++) { #pragma omp simd for (long ky = 0; ky < ny; ky++) { const long kindex1 = kx * ny * nz + ky * nz + k; const long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k); pSpace[kindex1] = pSpace[kindex2] = 0; mSpace[kindex1] = mSpace[kindex2] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < nx; kx++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { const long kindex1 = kx * ny * nz + k * nz + kz; const long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz; pSpace[kindex1] = pSpace[kindex2] = 0; mSpace[kindex1] = mSpace[kindex2] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long ky = 0; ky < ny; ky++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { const long kindex1 = k * ny * nz + ky * nz + kz; const long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz; pSpace[kindex1] = pSpace[kindex2] = 0; mSpace[kindex1] = mSpace[kindex2] = 0; } } } // interior #pragma omp parallel for collapse(3) num_threads(nthread) schedule(static) for (long bx = 4; bx < nx4; bx += BX_3D) { for (long by = 4; by < ny4; by += BY_3D) { for (long bz = 4; bz < nz4; bz += BZ_3D) { const long kxmax = MIN(bx + BX_3D, nx4); const long kymax = MIN(by + BY_3D, ny4); const long kzmax = MIN(bz + BZ_3D, nz4); for (long kx = bx; kx < kxmax; kx++) { const long kxnynz = kx * nynz; for (long ky = by; ky < kymax; ky++) { const long kynz = ky * nz; const long kxnynz_kynz = kxnynz + kynz; #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long k = kxnynz_kynz + kz; const long kynz_kz = + kynz + kz; const Type stencilDPx = c8_1 * (- tmpPX[(kx-1) * nynz + kynz_kz] + tmpPX[(kx+0) * nynz + kynz_kz]) + c8_2 * (- tmpPX[(kx-2) * nynz + kynz_kz] + tmpPX[(kx+1) * nynz + kynz_kz]) + c8_3 * (- tmpPX[(kx-3) * nynz + kynz_kz] + tmpPX[(kx+2) * nynz + kynz_kz]) + c8_4 * (- tmpPX[(kx-4) * nynz + kynz_kz] + tmpPX[(kx+3) * nynz + kynz_kz]); const Type stencilDPy = c8_1 * (- tmpPY[kxnynz + (ky-1) * nz + kz] + tmpPY[kxnynz + (ky+0) * nz + kz]) + c8_2 * (- tmpPY[kxnynz + (ky-2) * nz + kz] + tmpPY[kxnynz + (ky+1) * nz + kz]) + c8_3 * (- tmpPY[kxnynz + (ky-3) * nz + kz] + tmpPY[kxnynz + (ky+2) * nz + kz]) + c8_4 * (- tmpPY[kxnynz + (ky-4) * nz + kz] + tmpPY[kxnynz + (ky+3) * nz + kz]); const Type stencilDPz = c8_1 * (- tmpPZ[kxnynz_kynz + (kz-1)] + tmpPZ[kxnynz_kynz + (kz+0)]) + c8_2 * (- tmpPZ[kxnynz_kynz + (kz-2)] + tmpPZ[kxnynz_kynz + (kz+1)]) + c8_3 * (- tmpPZ[kxnynz_kynz + (kz-3)] + tmpPZ[kxnynz_kynz + (kz+2)]) + c8_4 * (- tmpPZ[kxnynz_kynz + (kz-4)] + tmpPZ[kxnynz_kynz + (kz+3)]); const Type stencilDMx = c8_1 * (- tmpMX[(kx-1) * nynz + kynz_kz] + tmpMX[(kx+0) * nynz + kynz_kz]) + c8_2 * (- tmpMX[(kx-2) * nynz + kynz_kz] + tmpMX[(kx+1) * nynz + kynz_kz]) + c8_3 * (- tmpMX[(kx-3) * nynz + kynz_kz] + tmpMX[(kx+2) * nynz + kynz_kz]) + c8_4 * (- tmpMX[(kx-4) * nynz + kynz_kz] + tmpMX[(kx+3) * nynz + kynz_kz]); const Type stencilDMy = c8_1 * (- tmpMY[kxnynz + (ky-1) * nz + kz] + tmpMY[kxnynz + (ky+0) * nz + kz]) + c8_2 * (- tmpMY[kxnynz + (ky-2) * nz + kz] + tmpMY[kxnynz + (ky+1) * nz + kz]) + c8_3 * (- tmpMY[kxnynz + (ky-3) * nz + kz] + tmpMY[kxnynz + (ky+2) * nz + kz]) + c8_4 * (- tmpMY[kxnynz + (ky-4) * nz + kz] + tmpMY[kxnynz + (ky+3) * nz + kz]); const Type stencilDMz = c8_1 * (- tmpMZ[kxnynz_kynz + (kz-1)] + tmpMZ[kxnynz_kynz + (kz+0)]) + c8_2 * (- tmpMZ[kxnynz_kynz + (kz-2)] + tmpMZ[kxnynz_kynz + (kz+1)]) + c8_3 * (- tmpMZ[kxnynz_kynz + (kz-3)] + tmpMZ[kxnynz_kynz + (kz+2)]) + c8_4 * (- tmpMZ[kxnynz_kynz + (kz-4)] + tmpMZ[kxnynz_kynz + (kz+3)]); const Type dPx = invDx * stencilDPx; const Type dPy = invDy * stencilDPy; const Type dPz = invDz * stencilDPz; const Type dMx = invDx * stencilDMx; const Type dMy = invDy * stencilDMy; const Type dMz = invDz * stencilDMz; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; pSpace[k] = dPx + dPy + dPz; mSpace[k] = dMx + dMy + dMz; pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } } } } } } // roll on free surface if (freeSurface) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 4; kx < nx4; kx++) { const long kxnynz = kx * nynz; #pragma omp simd for (long ky = 4; ky < ny4; ky++) { const long kynz = ky * nz; const long kxnynz_kynz = kxnynz + kynz; // kz = 0 -- at the free surface -- p = 0 // [kxnynz_kynz + 0] { const Type dPx = 0; const Type dPy = 0; const Type dPz = 0; const Type dMx = 0; const Type dMy = 0; const Type dMz = 0; const long k = kxnynz_kynz + 0; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; pOld[k] = dt2V2_B * (dPx + dPy + dPz) - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * (dMx + dMy + dMz) - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; pSpace[k] = dPx + dPy + dPz; mSpace[k] = dMx + dMy + dMz; } // kz = 1 -- one cell below the free surface // [kxnynz_kynz + 1] { const Type stencilDPx1 = c8_1 * (- tmpPX[(kx-1) * nynz + kynz + 1] + tmpPX[(kx+0) * nynz + kynz + 1]) + c8_2 * (- tmpPX[(kx-2) * nynz + kynz + 1] + tmpPX[(kx+1) * nynz + kynz + 1]) + c8_3 * (- tmpPX[(kx-3) * nynz + kynz + 1] + tmpPX[(kx+2) * nynz + kynz + 1]) + c8_4 * (- tmpPX[(kx-4) * nynz + kynz + 1] + tmpPX[(kx+3) * nynz + kynz + 1]); const Type stencilDPy1 = c8_1 * (- tmpPY[kxnynz + (ky-1) * nz + 1] + tmpPY[kxnynz + (ky+0) * nz + 1]) + c8_2 * (- tmpPY[kxnynz + (ky-2) * nz + 1] + tmpPY[kxnynz + (ky+1) * nz + 1]) + c8_3 * (- tmpPY[kxnynz + (ky-3) * nz + 1] + tmpPY[kxnynz + (ky+2) * nz + 1]) + c8_4 * (- tmpPY[kxnynz + (ky-4) * nz + 1] + tmpPY[kxnynz + (ky+3) * nz + 1]); const Type stencilDPz1 = c8_1 * (- tmpPZ[kxnynz_kynz + 0] + tmpPZ[kxnynz_kynz + 1]) + c8_2 * (- tmpPZ[kxnynz_kynz + 0] + tmpPZ[kxnynz_kynz + 2]) + c8_3 * (- tmpPZ[kxnynz_kynz + 1] + tmpPZ[kxnynz_kynz + 3]) + c8_4 * (- tmpPZ[kxnynz_kynz + 2] + tmpPZ[kxnynz_kynz + 4]); const Type stencilDMx1 = c8_1 * (- tmpMX[(kx-1) * nynz + kynz + 1] + tmpMX[(kx+0) * nynz + kynz + 1]) + c8_2 * (- tmpMX[(kx-2) * nynz + kynz + 1] + tmpMX[(kx+1) * nynz + kynz + 1]) + c8_3 * (- tmpMX[(kx-3) * nynz + kynz + 1] + tmpMX[(kx+2) * nynz + kynz + 1]) + c8_4 * (- tmpMX[(kx-4) * nynz + kynz + 1] + tmpMX[(kx+3) * nynz + kynz + 1]); const Type stencilDMy1 = c8_1 * (- tmpMY[kxnynz + (ky-1) * nz + 1] + tmpMY[kxnynz + (ky+0) * nz + 1]) + c8_2 * (- tmpMY[kxnynz + (ky-2) * nz + 1] + tmpMY[kxnynz + (ky+1) * nz + 1]) + c8_3 * (- tmpMY[kxnynz + (ky-3) * nz + 1] + tmpMY[kxnynz + (ky+2) * nz + 1]) + c8_4 * (- tmpMY[kxnynz + (ky-4) * nz + 1] + tmpMY[kxnynz + (ky+3) * nz + 1]); const Type stencilDMz1 = c8_1 * (- tmpMZ[kxnynz_kynz + 0] + tmpMZ[kxnynz_kynz + 1]) + c8_2 * (- tmpMZ[kxnynz_kynz + 0] + tmpMZ[kxnynz_kynz + 2]) + c8_3 * (- tmpMZ[kxnynz_kynz + 1] + tmpMZ[kxnynz_kynz + 3]) + c8_4 * (- tmpMZ[kxnynz_kynz + 2] + tmpMZ[kxnynz_kynz + 4]); const Type dPx = invDx * stencilDPx1; const Type dPy = invDy * stencilDPy1; const Type dPz = invDz * stencilDPz1; const Type dMx = invDx * stencilDMx1; const Type dMy = invDy * stencilDMy1; const Type dMz = invDz * stencilDMz1; const long k = kxnynz_kynz + 1; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; pSpace[k] = dPx + dPy + dPz; mSpace[k] = dMx + dMy + dMz; pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } // kz = 2 -- two cells below the free surface // [kxnynz_kynz + 2] { const Type stencilDPx2 = c8_1 * (- tmpPX[(kx-1) * nynz + kynz + 2] + tmpPX[(kx+0) * nynz + kynz + 2]) + c8_2 * (- tmpPX[(kx-2) * nynz + kynz + 2] + tmpPX[(kx+1) * nynz + kynz + 2]) + c8_3 * (- tmpPX[(kx-3) * nynz + kynz + 2] + tmpPX[(kx+2) * nynz + kynz + 2]) + c8_4 * (- tmpPX[(kx-4) * nynz + kynz + 2] + tmpPX[(kx+3) * nynz + kynz + 2]); const Type stencilDPy2 = c8_1 * (- tmpPY[kxnynz + (ky-1) * nz + 2] + tmpPY[kxnynz + (ky+0) * nz + 2]) + c8_2 * (- tmpPY[kxnynz + (ky-2) * nz + 2] + tmpPY[kxnynz + (ky+1) * nz + 2]) + c8_3 * (- tmpPY[kxnynz + (ky-3) * nz + 2] + tmpPY[kxnynz + (ky+2) * nz + 2]) + c8_4 * (- tmpPY[kxnynz + (ky-4) * nz + 2] + tmpPY[kxnynz + (ky+3) * nz + 2]); const Type stencilDPz2 = c8_1 * (- tmpPZ[kxnynz_kynz + 1] + tmpPZ[kxnynz_kynz + 2]) + c8_2 * (- tmpPZ[kxnynz_kynz + 0] + tmpPZ[kxnynz_kynz + 3]) + c8_3 * (- tmpPZ[kxnynz_kynz + 0] + tmpPZ[kxnynz_kynz + 4]) + c8_4 * (- tmpPZ[kxnynz_kynz + 1] + tmpPZ[kxnynz_kynz + 5]); const Type stencilDMx2 = c8_1 * (- tmpMX[(kx-1) * nynz + kynz + 2] + tmpMX[(kx+0) * nynz + kynz + 2]) + c8_2 * (- tmpMX[(kx-2) * nynz + kynz + 2] + tmpMX[(kx+1) * nynz + kynz + 2]) + c8_3 * (- tmpMX[(kx-3) * nynz + kynz + 2] + tmpMX[(kx+2) * nynz + kynz + 2]) + c8_4 * (- tmpMX[(kx-4) * nynz + kynz + 2] + tmpMX[(kx+3) * nynz + kynz + 2]); const Type stencilDMy2 = c8_1 * (- tmpMY[kxnynz + (ky-1) * nz + 2] + tmpMY[kxnynz + (ky+0) * nz + 2]) + c8_2 * (- tmpMY[kxnynz + (ky-2) * nz + 2] + tmpMY[kxnynz + (ky+1) * nz + 2]) + c8_3 * (- tmpMY[kxnynz + (ky-3) * nz + 2] + tmpMY[kxnynz + (ky+2) * nz + 2]) + c8_4 * (- tmpMY[kxnynz + (ky-4) * nz + 2] + tmpMY[kxnynz + (ky+3) * nz + 2]); const Type stencilDMz2 = c8_1 * (- tmpMZ[kxnynz_kynz + 1] + tmpMZ[kxnynz_kynz + 2]) + c8_2 * (- tmpMZ[kxnynz_kynz + 0] + tmpMZ[kxnynz_kynz + 3]) + c8_3 * (- tmpMZ[kxnynz_kynz + 0] + tmpMZ[kxnynz_kynz + 4]) + c8_4 * (- tmpMZ[kxnynz_kynz + 1] + tmpMZ[kxnynz_kynz + 5]); const Type dPx = invDx * stencilDPx2; const Type dPy = invDy * stencilDPy2; const Type dPz = invDz * stencilDPz2; const Type dMx = invDx * stencilDMx2; const Type dMy = invDy * stencilDMy2; const Type dMz = invDz * stencilDMz2; const long k = kxnynz_kynz + 2; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; pSpace[k] = dPx + dPy + dPz; mSpace[k] = dMx + dMy + dMz; pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } // kz = 3 -- three cells below the free surface // [kxnynz_kynz + 3] { const Type stencilDPx3 = c8_1 * (- tmpPX[(kx-1) * nynz + kynz + 3] + tmpPX[(kx+0) * nynz + kynz + 3]) + c8_2 * (- tmpPX[(kx-2) * nynz + kynz + 3] + tmpPX[(kx+1) * nynz + kynz + 3]) + c8_3 * (- tmpPX[(kx-3) * nynz + kynz + 3] + tmpPX[(kx+2) * nynz + kynz + 3]) + c8_4 * (- tmpPX[(kx-4) * nynz + kynz + 3] + tmpPX[(kx+3) * nynz + kynz + 3]); const Type stencilDPy3 = c8_1 * (- tmpPY[kxnynz + (ky-1) * nz + 3] + tmpPY[kxnynz + (ky+0) * nz + 3]) + c8_2 * (- tmpPY[kxnynz + (ky-2) * nz + 3] + tmpPY[kxnynz + (ky+1) * nz + 3]) + c8_3 * (- tmpPY[kxnynz + (ky-3) * nz + 3] + tmpPY[kxnynz + (ky+2) * nz + 3]) + c8_4 * (- tmpPY[kxnynz + (ky-4) * nz + 3] + tmpPY[kxnynz + (ky+3) * nz + 3]); const Type stencilDPz3 = c8_1 * (- tmpPZ[kxnynz_kynz + 2] + tmpPZ[kxnynz_kynz + 3]) + c8_2 * (- tmpPZ[kxnynz_kynz + 1] + tmpPZ[kxnynz_kynz + 4]) + c8_3 * (- tmpPZ[kxnynz_kynz + 0] + tmpPZ[kxnynz_kynz + 5]) + c8_4 * (- tmpPZ[kxnynz_kynz + 0] + tmpPZ[kxnynz_kynz + 6]); const Type stencilDMx3 = c8_1 * (- tmpMX[(kx-1) * nynz + kynz + 3] + tmpMX[(kx+0) * nynz + kynz + 3]) + c8_2 * (- tmpMX[(kx-2) * nynz + kynz + 3] + tmpMX[(kx+1) * nynz + kynz + 3]) + c8_3 * (- tmpMX[(kx-3) * nynz + kynz + 3] + tmpMX[(kx+2) * nynz + kynz + 3]) + c8_4 * (- tmpMX[(kx-4) * nynz + kynz + 3] + tmpMX[(kx+3) * nynz + kynz + 3]); const Type stencilDMy3 = c8_1 * (- tmpMY[kxnynz + (ky-1) * nz + 3] + tmpMY[kxnynz + (ky+0) * nz + 3]) + c8_2 * (- tmpMY[kxnynz + (ky-2) * nz + 3] + tmpMY[kxnynz + (ky+1) * nz + 3]) + c8_3 * (- tmpMY[kxnynz + (ky-3) * nz + 3] + tmpMY[kxnynz + (ky+2) * nz + 3]) + c8_4 * (- tmpMY[kxnynz + (ky-4) * nz + 3] + tmpMY[kxnynz + (ky+3) * nz + 3]); const Type stencilDMz3 = c8_1 * (- tmpMZ[kxnynz_kynz + 2] + tmpMZ[kxnynz_kynz + 3]) + c8_2 * (- tmpMZ[kxnynz_kynz + 1] + tmpMZ[kxnynz_kynz + 4]) + c8_3 * (- tmpMZ[kxnynz_kynz + 0] + tmpMZ[kxnynz_kynz + 5]) + c8_4 * (- tmpMZ[kxnynz_kynz + 0] + tmpMZ[kxnynz_kynz + 6]); const Type dPx = invDx * stencilDPx3; const Type dPy = invDy * stencilDPy3; const Type dPz = invDz * stencilDPz3; const Type dMx = invDx * stencilDMx3; const Type dMy = invDy * stencilDMy3; const Type dMz = invDz * stencilDMz3; const long k = kxnynz_kynz + 3; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; pSpace[k] = dPx + dPy + dPz; mSpace[k] = dMx + dMy + dMz; pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } } } } } template<class Type> #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline static void applyFirstDerivatives3D_TTI_PlusHalf_Sandwich( const long freeSurface, const long nx, const long ny, const long nz, const long nthread, const Type c8_1, const Type c8_2, const Type c8_3, const Type c8_4, const Type invDx, const Type invDy, const Type invDz, Type * __restrict__ inP_G1, Type * __restrict__ inP_G2, Type * __restrict__ inP_G3, Type * __restrict__ inM_G1, Type * __restrict__ inM_G2, Type * __restrict__ inM_G3, Type * __restrict__ fieldEps, Type * __restrict__ fieldEta, Type * __restrict__ fieldVsVp, Type * __restrict__ fieldBuoy, float * __restrict__ sinTheta, float * __restrict__ cosTheta, float * __restrict__ sinPhi, float * __restrict__ cosPhi, Type * __restrict__ outP_G1, Type * __restrict__ outP_G2, Type * __restrict__ outP_G3, Type * __restrict__ outM_G1, Type * __restrict__ outM_G2, Type * __restrict__ outM_G3, const long BX_3D, const long BY_3D, const long BZ_3D) { const long nx4 = nx - 4; const long ny4 = ny - 4; const long nz4 = nz - 4; const long nynz = ny * nz; // zero output array: note only the annulus that is in the absorbing boundary needs to be zeroed for (long k = 0; k < 4; k++) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < nx; kx++) { #pragma omp simd for (long ky = 0; ky < ny; ky++) { long kindex1 = kx * ny * nz + ky * nz + k; long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k); outP_G1[kindex1] = outP_G1[kindex2] = 0; outP_G2[kindex1] = outP_G2[kindex2] = 0; outP_G3[kindex1] = outP_G3[kindex2] = 0; outM_G1[kindex1] = outM_G1[kindex2] = 0; outM_G2[kindex1] = outM_G2[kindex2] = 0; outM_G3[kindex1] = outM_G3[kindex2] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < nx; kx++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { long kindex1 = kx * ny * nz + k * nz + kz; long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz; outP_G1[kindex1] = outP_G1[kindex2] = 0; outP_G2[kindex1] = outP_G2[kindex2] = 0; outP_G3[kindex1] = outP_G3[kindex2] = 0; outM_G1[kindex1] = outM_G1[kindex2] = 0; outM_G2[kindex1] = outM_G2[kindex2] = 0; outM_G3[kindex1] = outM_G3[kindex2] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long ky = 0; ky < ny; ky++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { long kindex1 = k * ny * nz + ky * nz + kz; long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz; outP_G1[kindex1] = outP_G1[kindex2] = 0; outP_G2[kindex1] = outP_G2[kindex2] = 0; outP_G3[kindex1] = outP_G3[kindex2] = 0; outM_G1[kindex1] = outM_G1[kindex2] = 0; outM_G2[kindex1] = outM_G2[kindex2] = 0; outM_G3[kindex1] = outM_G3[kindex2] = 0; } } } // interior #pragma omp parallel for collapse(3) num_threads(nthread) schedule(static) for (long bx = 4; bx < nx4; bx += BX_3D) { for (long by = 4; by < ny4; by += BY_3D) { for (long bz = 4; bz < nz4; bz += BZ_3D) { const long kxmax = MIN(bx + BX_3D, nx4); const long kymax = MIN(by + BY_3D, ny4); const long kzmax = MIN(bz + BZ_3D, nz4); for (long kx = bx; kx < kxmax; kx++) { const long kxnynz = kx * nynz; for (long ky = by; ky < kymax; ky++) { const long kynz = ky * nz; const long kxnynz_kynz = kxnynz + kynz; #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long kynz_kz = + kynz + kz; const Type stencilP_G1 = c8_1 * (- inP_G1[(kx+0) * nynz + kynz_kz] + inP_G1[(kx+1) * nynz + kynz_kz]) + c8_2 * (- inP_G1[(kx-1) * nynz + kynz_kz] + inP_G1[(kx+2) * nynz + kynz_kz]) + c8_3 * (- inP_G1[(kx-2) * nynz + kynz_kz] + inP_G1[(kx+3) * nynz + kynz_kz]) + c8_4 * (- inP_G1[(kx-3) * nynz + kynz_kz] + inP_G1[(kx+4) * nynz + kynz_kz]); const Type stencilP_G2 = c8_1 * (- inP_G2[kxnynz + (ky+0) * nz + kz] + inP_G2[kxnynz + (ky+1) * nz + kz]) + c8_2 * (- inP_G2[kxnynz + (ky-1) * nz + kz] + inP_G2[kxnynz + (ky+2) * nz + kz]) + c8_3 * (- inP_G2[kxnynz + (ky-2) * nz + kz] + inP_G2[kxnynz + (ky+3) * nz + kz]) + c8_4 * (- inP_G2[kxnynz + (ky-3) * nz + kz] + inP_G2[kxnynz + (ky+4) * nz + kz]); const Type stencilP_G3 = c8_1 * (- inP_G3[kxnynz_kynz + (kz+0)] + inP_G3[kxnynz_kynz + (kz+1)]) + c8_2 * (- inP_G3[kxnynz_kynz + (kz-1)] + inP_G3[kxnynz_kynz + (kz+2)]) + c8_3 * (- inP_G3[kxnynz_kynz + (kz-2)] + inP_G3[kxnynz_kynz + (kz+3)]) + c8_4 * (- inP_G3[kxnynz_kynz + (kz-3)] + inP_G3[kxnynz_kynz + (kz+4)]); const Type stencilM_G1 = c8_1 * (- inM_G1[(kx+0) * nynz + kynz_kz] + inM_G1[(kx+1) * nynz + kynz_kz]) + c8_2 * (- inM_G1[(kx-1) * nynz + kynz_kz] + inM_G1[(kx+2) * nynz + kynz_kz]) + c8_3 * (- inM_G1[(kx-2) * nynz + kynz_kz] + inM_G1[(kx+3) * nynz + kynz_kz]) + c8_4 * (- inM_G1[(kx-3) * nynz + kynz_kz] + inM_G1[(kx+4) * nynz + kynz_kz]); const Type stencilM_G2 = c8_1 * (- inM_G2[kxnynz + (ky+0) * nz + kz] + inM_G2[kxnynz + (ky+1) * nz + kz]) + c8_2 * (- inM_G2[kxnynz + (ky-1) * nz + kz] + inM_G2[kxnynz + (ky+2) * nz + kz]) + c8_3 * (- inM_G2[kxnynz + (ky-2) * nz + kz] + inM_G2[kxnynz + (ky+3) * nz + kz]) + c8_4 * (- inM_G2[kxnynz + (ky-3) * nz + kz] + inM_G2[kxnynz + (ky+4) * nz + kz]); const Type stencilM_G3 = c8_1 * (- inM_G3[kxnynz_kynz + (kz+0)] + inM_G3[kxnynz_kynz + (kz+1)]) + c8_2 * (- inM_G3[kxnynz_kynz + (kz-1)] + inM_G3[kxnynz_kynz + (kz+2)]) + c8_3 * (- inM_G3[kxnynz_kynz + (kz-2)] + inM_G3[kxnynz_kynz + (kz+3)]) + c8_4 * (- inM_G3[kxnynz_kynz + (kz-3)] + inM_G3[kxnynz_kynz + (kz+4)]); const Type dpx = invDx * stencilP_G1; const Type dpy = invDy * stencilP_G2; const Type dpz = invDz * stencilP_G3; const Type dmx = invDx * stencilM_G1; const Type dmy = invDy * stencilM_G2; const Type dmz = invDz * stencilM_G3; long k = kxnynz_kynz + kz; const Type E = 1 + 2 * fieldEps[k]; const Type A = fieldEta[k]; const Type F = fieldVsVp[k]; const Type B = fieldBuoy[k]; const Type SA2 = sqrt(1 - A * A); const float cosThetaCosPhi = cosTheta[k] * cosPhi[k]; const float cosThetaSinPhi = cosTheta[k] * sinPhi[k]; const float sinThetaCosPhi = sinTheta[k] * cosPhi[k]; Type dPg1 = cosThetaCosPhi * dpx + cosThetaSinPhi * dpy - sinTheta[k] * dpz; Type dPg2 = - sinPhi[k] * dpx + cosPhi[k] * dpy; Type dPg3 = sinThetaCosPhi * dpx + sinTheta[k] * sinPhi[k] * dpy + cosTheta[k] * dpz; Type dMg1 = cosThetaCosPhi * dmx + cosThetaSinPhi * dmy - sinTheta[k] * dmz; Type dMg2 = - sinPhi[k] * dmx + cosPhi[k] * dmy; Type dMg3 = sinThetaCosPhi * dmx + sinTheta[k] * sinPhi[k] * dmy + cosTheta[k] * dmz; // combine terms for application of adjoint g3 outP_G1[k] = B * E * dPg1; outP_G2[k] = B * E * dPg2; outP_G3[k] = B * (1 - F * A * A) * dPg3 + B * F * A * SA2 * dMg3; outM_G1[k] = B * (1 - F) * dMg1; outM_G2[k] = B * (1 - F) * dMg2; outM_G3[k] = B * F * A * SA2 * dPg3 + B * (1 - F + F * A * A) * dMg3; } } } } } } // roll on free surface if (freeSurface) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 4; kx < nx4; kx++) { const long kxnynz = kx * nynz; #pragma omp simd for (long ky = 4; ky < ny4; ky++) { const long kynz = ky * nz; const long kxnynz_kynz = kxnynz + kynz; // kz = 0 -- 1/2 cells below free surface for Z derivative, at free surface for X/Y derivative // X and Y derivatives are identically zero { const Type stencilP_G3 = c8_1 * (- inP_G3[kxnynz_kynz + 0] + inP_G3[kxnynz_kynz + 1]) + c8_2 * (+ inP_G3[kxnynz_kynz + 1] + inP_G3[kxnynz_kynz + 2]) + c8_3 * (+ inP_G3[kxnynz_kynz + 2] + inP_G3[kxnynz_kynz + 3]) + c8_4 * (+ inP_G3[kxnynz_kynz + 3] + inP_G3[kxnynz_kynz + 4]); const Type stencilM_G3 = c8_1 * (- inM_G3[kxnynz_kynz + 0] + inM_G3[kxnynz_kynz + 1]) + c8_2 * (+ inM_G3[kxnynz_kynz + 1] + inM_G3[kxnynz_kynz + 2]) + c8_3 * (+ inM_G3[kxnynz_kynz + 2] + inM_G3[kxnynz_kynz + 3]) + c8_4 * (+ inM_G3[kxnynz_kynz + 3] + inM_G3[kxnynz_kynz + 4]); const Type dpx = 0; const Type dpy = 0; const Type dpz = invDz * stencilP_G3; const Type dmx = 0; const Type dmy = 0; const Type dmz = invDz * stencilM_G3; const long k = kxnynz_kynz + 0; const Type E = 1 + 2 * fieldEps[k]; const Type A = fieldEta[k]; const Type F = fieldVsVp[k]; const Type B = fieldBuoy[k]; const Type SA2 = sqrt(1 - A * A); const float cosThetaCosPhi = cosTheta[k] * cosPhi[k]; const float cosThetaSinPhi = cosTheta[k] * sinPhi[k]; const float sinThetaCosPhi = sinTheta[k] * cosPhi[k]; Type dPg1 = cosThetaCosPhi * dpx + cosThetaSinPhi * dpy - sinTheta[k] * dpz; Type dPg2 = - sinPhi[k] * dpx + cosPhi[k] * dpy; Type dPg3 = sinThetaCosPhi * dpx + sinTheta[k] * sinPhi[k] * dpy + cosTheta[k] * dpz; Type dMg1 = cosThetaCosPhi * dmx + cosThetaSinPhi * dmy - sinTheta[k] * dmz; Type dMg2 = - sinPhi[k] * dmx + cosPhi[k] * dmy; Type dMg3 = sinThetaCosPhi * dmx + sinTheta[k] * sinPhi[k] * dmy + cosTheta[k] * dmz; // combine terms for application of adjoint g3 outP_G1[k] = B * E * dPg1; outP_G2[k] = B * E * dPg2; outP_G3[k] = B * (1 - F * A * A) * dPg3 + B * F * A * SA2 * dMg3; outM_G1[k] = B * (1 - F) * dMg1; outM_G2[k] = B * (1 - F) * dMg2; outM_G3[k] = B * F * A * SA2 * dPg3 + B * (1 - F + F * A * A) * dMg3; } // kz = 1 -- 1 1/2 cells below free surface for Z derivative, 1 cells below for X/Y derivative { const Type stencilP_G11 = c8_1 * (- inP_G1[(kx+0) * nynz + kynz + 1] + inP_G1[(kx+1) * nynz + kynz + 1]) + c8_2 * (- inP_G1[(kx-1) * nynz + kynz + 1] + inP_G1[(kx+2) * nynz + kynz + 1]) + c8_3 * (- inP_G1[(kx-2) * nynz + kynz + 1] + inP_G1[(kx+3) * nynz + kynz + 1]) + c8_4 * (- inP_G1[(kx-3) * nynz + kynz + 1] + inP_G1[(kx+4) * nynz + kynz + 1]); const Type stencilP_G21 = c8_1 * (- inP_G2[kxnynz + (ky+0) * nz + 1] + inP_G2[kxnynz + (ky+1) * nz + 1]) + c8_2 * (- inP_G2[kxnynz + (ky-1) * nz + 1] + inP_G2[kxnynz + (ky+2) * nz + 1]) + c8_3 * (- inP_G2[kxnynz + (ky-2) * nz + 1] + inP_G2[kxnynz + (ky+3) * nz + 1]) + c8_4 * (- inP_G2[kxnynz + (ky-3) * nz + 1] + inP_G2[kxnynz + (ky+4) * nz + 1]); const Type stencilP_G31 = c8_1 * (- inP_G3[kxnynz_kynz + 1] + inP_G3[kxnynz_kynz + 2]) + c8_2 * (- inP_G3[kxnynz_kynz + 0] + inP_G3[kxnynz_kynz + 3]) + c8_3 * (+ inP_G3[kxnynz_kynz + 1] + inP_G3[kxnynz_kynz + 4]) + c8_4 * (+ inP_G3[kxnynz_kynz + 2] + inP_G3[kxnynz_kynz + 5]); const Type stencilM_G11 = c8_1 * (- inM_G1[(kx+0) * nynz + kynz + 1] + inM_G1[(kx+1) * nynz + kynz + 1]) + c8_2 * (- inM_G1[(kx-1) * nynz + kynz + 1] + inM_G1[(kx+2) * nynz + kynz + 1]) + c8_3 * (- inM_G1[(kx-2) * nynz + kynz + 1] + inM_G1[(kx+3) * nynz + kynz + 1]) + c8_4 * (- inM_G1[(kx-3) * nynz + kynz + 1] + inM_G1[(kx+4) * nynz + kynz + 1]); const Type stencilM_G21 = c8_1 * (- inM_G2[kxnynz + (ky+0) * nz + 1] + inM_G2[kxnynz + (ky+1) * nz + 1]) + c8_2 * (- inM_G2[kxnynz + (ky-1) * nz + 1] + inM_G2[kxnynz + (ky+2) * nz + 1]) + c8_3 * (- inM_G2[kxnynz + (ky-2) * nz + 1] + inM_G2[kxnynz + (ky+3) * nz + 1]) + c8_4 * (- inM_G2[kxnynz + (ky-3) * nz + 1] + inM_G2[kxnynz + (ky+4) * nz + 1]); const Type stencilM_G31 = c8_1 * (- inM_G3[kxnynz_kynz + 1] + inM_G3[kxnynz_kynz + 2]) + c8_2 * (- inM_G3[kxnynz_kynz + 0] + inM_G3[kxnynz_kynz + 3]) + c8_3 * (+ inM_G3[kxnynz_kynz + 1] + inM_G3[kxnynz_kynz + 4]) + c8_4 * (+ inM_G3[kxnynz_kynz + 2] + inM_G3[kxnynz_kynz + 5]); const Type dpx = invDx * stencilP_G11; const Type dpy = invDy * stencilP_G21; const Type dpz = invDz * stencilP_G31; const Type dmx = invDx * stencilM_G11; const Type dmy = invDy * stencilM_G21; const Type dmz = invDz * stencilM_G31; const long k = kxnynz_kynz + 1; const Type E = 1 + 2 * fieldEps[k]; const Type A = fieldEta[k]; const Type F = fieldVsVp[k]; const Type B = fieldBuoy[k]; const Type SA2 = sqrt(1 - A * A); const float cosThetaCosPhi = cosTheta[k] * cosPhi[k]; const float cosThetaSinPhi = cosTheta[k] * sinPhi[k]; const float sinThetaCosPhi = sinTheta[k] * cosPhi[k]; Type dPg1 = cosThetaCosPhi * dpx + cosThetaSinPhi * dpy - sinTheta[k] * dpz; Type dPg2 = - sinPhi[k] * dpx + cosPhi[k] * dpy; Type dPg3 = sinThetaCosPhi * dpx + sinTheta[k] * sinPhi[k] * dpy + cosTheta[k] * dpz; Type dMg1 = cosThetaCosPhi * dmx + cosThetaSinPhi * dmy - sinTheta[k] * dmz; Type dMg2 = - sinPhi[k] * dmx + cosPhi[k] * dmy; Type dMg3 = sinThetaCosPhi * dmx + sinTheta[k] * sinPhi[k] * dmy + cosTheta[k] * dmz; // combine terms for application of adjoint g3 outP_G1[k] = B * E * dPg1; outP_G2[k] = B * E * dPg2; outP_G3[k] = B * (1 - F * A * A) * dPg3 + B * F * A * SA2 * dMg3; outM_G1[k] = B * (1 - F) * dMg1; outM_G2[k] = B * (1 - F) * dMg2; outM_G3[k] = B * F * A * SA2 * dPg3 + B * (1 - F + F * A * A) * dMg3; } // kz = 2 -- 2 1/2 cells below free surface for Z derivative, 2 cells below for X/Y derivative { const Type stencilP_G12 = c8_1 * (- inP_G1[(kx+0) * nynz + kynz + 2] + inP_G1[(kx+1) * nynz + kynz + 2]) + c8_2 * (- inP_G1[(kx-1) * nynz + kynz + 2] + inP_G1[(kx+2) * nynz + kynz + 2]) + c8_3 * (- inP_G1[(kx-2) * nynz + kynz + 2] + inP_G1[(kx+3) * nynz + kynz + 2]) + c8_4 * (- inP_G1[(kx-3) * nynz + kynz + 2] + inP_G1[(kx+4) * nynz + kynz + 2]); const Type stencilP_G22 = c8_1 * (- inP_G2[kxnynz + (ky+0) * nz + 2] + inP_G2[kxnynz + (ky+1) * nz + 2]) + c8_2 * (- inP_G2[kxnynz + (ky-1) * nz + 2] + inP_G2[kxnynz + (ky+2) * nz + 2]) + c8_3 * (- inP_G2[kxnynz + (ky-2) * nz + 2] + inP_G2[kxnynz + (ky+3) * nz + 2]) + c8_4 * (- inP_G2[kxnynz + (ky-3) * nz + 2] + inP_G2[kxnynz + (ky+4) * nz + 2]); const Type stencilP_G32 = c8_1 * (- inP_G3[kxnynz_kynz + 2] + inP_G3[kxnynz_kynz + 3]) + c8_2 * (- inP_G3[kxnynz_kynz + 1] + inP_G3[kxnynz_kynz + 4]) + c8_3 * (- inP_G3[kxnynz_kynz + 0] + inP_G3[kxnynz_kynz + 5]) + c8_4 * (+ inP_G3[kxnynz_kynz + 1] + inP_G3[kxnynz_kynz + 6]); const Type stencilM_G12 = c8_1 * (- inM_G1[(kx+0) * nynz + kynz + 2] + inM_G1[(kx+1) * nynz + kynz + 2]) + c8_2 * (- inM_G1[(kx-1) * nynz + kynz + 2] + inM_G1[(kx+2) * nynz + kynz + 2]) + c8_3 * (- inM_G1[(kx-2) * nynz + kynz + 2] + inM_G1[(kx+3) * nynz + kynz + 2]) + c8_4 * (- inM_G1[(kx-3) * nynz + kynz + 2] + inM_G1[(kx+4) * nynz + kynz + 2]); const Type stencilM_G22 = c8_1 * (- inM_G2[kxnynz + (ky+0) * nz + 2] + inM_G2[kxnynz + (ky+1) * nz + 2]) + c8_2 * (- inM_G2[kxnynz + (ky-1) * nz + 2] + inM_G2[kxnynz + (ky+2) * nz + 2]) + c8_3 * (- inM_G2[kxnynz + (ky-2) * nz + 2] + inM_G2[kxnynz + (ky+3) * nz + 2]) + c8_4 * (- inM_G2[kxnynz + (ky-3) * nz + 2] + inM_G2[kxnynz + (ky+4) * nz + 2]); const Type stencilM_G32 = c8_1 * (- inM_G3[kxnynz_kynz + 2] + inM_G3[kxnynz_kynz + 3]) + c8_2 * (- inM_G3[kxnynz_kynz + 1] + inM_G3[kxnynz_kynz + 4]) + c8_3 * (- inM_G3[kxnynz_kynz + 0] + inM_G3[kxnynz_kynz + 5]) + c8_4 * (+ inM_G3[kxnynz_kynz + 1] + inM_G3[kxnynz_kynz + 6]); const Type dpx = invDx * stencilP_G12; const Type dpy = invDy * stencilP_G22; const Type dpz = invDz * stencilP_G32; const Type dmx = invDx * stencilM_G12; const Type dmy = invDy * stencilM_G22; const Type dmz = invDz * stencilM_G32; const long k = kxnynz_kynz + 2; const Type E = 1 + 2 * fieldEps[k]; const Type A = fieldEta[k]; const Type F = fieldVsVp[k]; const Type B = fieldBuoy[k]; const Type SA2 = sqrt(1 - A * A); const float cosThetaCosPhi = cosTheta[k] * cosPhi[k]; const float cosThetaSinPhi = cosTheta[k] * sinPhi[k]; const float sinThetaCosPhi = sinTheta[k] * cosPhi[k]; Type dPg1 = cosThetaCosPhi * dpx + cosThetaSinPhi * dpy - sinTheta[k] * dpz; Type dPg2 = - sinPhi[k] * dpx + cosPhi[k] * dpy; Type dPg3 = sinThetaCosPhi * dpx + sinTheta[k] * sinPhi[k] * dpy + cosTheta[k] * dpz; Type dMg1 = cosThetaCosPhi * dmx + cosThetaSinPhi * dmy - sinTheta[k] * dmz; Type dMg2 = - sinPhi[k] * dmx + cosPhi[k] * dmy; Type dMg3 = sinThetaCosPhi * dmx + sinTheta[k] * sinPhi[k] * dmy + cosTheta[k] * dmz; // combine terms for application of adjoint g3 outP_G1[k] = B * E * dPg1; outP_G2[k] = B * E * dPg2; outP_G3[k] = B * (1 - F * A * A) * dPg3 + B * F * A * SA2 * dMg3; outM_G1[k] = B * (1 - F) * dMg1; outM_G2[k] = B * (1 - F) * dMg2; outM_G3[k] = B * F * A * SA2 * dPg3 + B * (1 - F + F * A * A) * dMg3; } // kz = 3 -- 3 1/2 cells below free surface for Z derivative, 3 cells below for X/Y derivative { const Type stencilP_G13 = c8_1 * (- inP_G1[(kx+0) * nynz + kynz + 3] + inP_G1[(kx+1) * nynz + kynz + 3]) + c8_2 * (- inP_G1[(kx-1) * nynz + kynz + 3] + inP_G1[(kx+2) * nynz + kynz + 3]) + c8_3 * (- inP_G1[(kx-2) * nynz + kynz + 3] + inP_G1[(kx+3) * nynz + kynz + 3]) + c8_4 * (- inP_G1[(kx-3) * nynz + kynz + 3] + inP_G1[(kx+4) * nynz + kynz + 3]); const Type stencilP_G23 = c8_1 * (- inP_G2[kxnynz + (ky+0) * nz + 3] + inP_G2[kxnynz + (ky+1) * nz + 3]) + c8_2 * (- inP_G2[kxnynz + (ky-1) * nz + 3] + inP_G2[kxnynz + (ky+2) * nz + 3]) + c8_3 * (- inP_G2[kxnynz + (ky-2) * nz + 3] + inP_G2[kxnynz + (ky+3) * nz + 3]) + c8_4 * (- inP_G2[kxnynz + (ky-3) * nz + 3] + inP_G2[kxnynz + (ky+4) * nz + 3]); const Type stencilP_G33 = c8_1 * (- inP_G3[kxnynz_kynz + 3] + inP_G3[kxnynz_kynz + 4]) + c8_2 * (- inP_G3[kxnynz_kynz + 2] + inP_G3[kxnynz_kynz + 5]) + c8_3 * (- inP_G3[kxnynz_kynz + 1] + inP_G3[kxnynz_kynz + 6]) + c8_4 * (- inP_G3[kxnynz_kynz + 0] + inP_G3[kxnynz_kynz + 7]); const Type stencilM_G13 = c8_1 * (- inM_G1[(kx+0) * nynz + kynz + 3] + inM_G1[(kx+1) * nynz + kynz + 3]) + c8_2 * (- inM_G1[(kx-1) * nynz + kynz + 3] + inM_G1[(kx+2) * nynz + kynz + 3]) + c8_3 * (- inM_G1[(kx-2) * nynz + kynz + 3] + inM_G1[(kx+3) * nynz + kynz + 3]) + c8_4 * (- inM_G1[(kx-3) * nynz + kynz + 3] + inM_G1[(kx+4) * nynz + kynz + 3]); const Type stencilM_G23 = c8_1 * (- inM_G2[kxnynz + (ky+0) * nz + 3] + inM_G2[kxnynz + (ky+1) * nz + 3]) + c8_2 * (- inM_G2[kxnynz + (ky-1) * nz + 3] + inM_G2[kxnynz + (ky+2) * nz + 3]) + c8_3 * (- inM_G2[kxnynz + (ky-2) * nz + 3] + inM_G2[kxnynz + (ky+3) * nz + 3]) + c8_4 * (- inM_G2[kxnynz + (ky-3) * nz + 3] + inM_G2[kxnynz + (ky+4) * nz + 3]); const Type stencilM_G33 = c8_1 * (- inM_G3[kxnynz_kynz + 3] + inM_G3[kxnynz_kynz + 4]) + c8_2 * (- inM_G3[kxnynz_kynz + 2] + inM_G3[kxnynz_kynz + 5]) + c8_3 * (- inM_G3[kxnynz_kynz + 1] + inM_G3[kxnynz_kynz + 6]) + c8_4 * (- inM_G3[kxnynz_kynz + 0] + inM_G3[kxnynz_kynz + 7]); const Type dpx = invDx * stencilP_G13; const Type dpy = invDy * stencilP_G23; const Type dpz = invDz * stencilP_G33; const Type dmx = invDx * stencilM_G13; const Type dmy = invDy * stencilM_G23; const Type dmz = invDz * stencilM_G33; const long k = kxnynz_kynz + 3; const Type E = 1 + 2 * fieldEps[k]; const Type A = fieldEta[k]; const Type F = fieldVsVp[k]; const Type B = fieldBuoy[k]; const Type SA2 = sqrt(1 - A * A); const float cosThetaCosPhi = cosTheta[k] * cosPhi[k]; const float cosThetaSinPhi = cosTheta[k] * sinPhi[k]; const float sinThetaCosPhi = sinTheta[k] * cosPhi[k]; Type dPg1 = cosThetaCosPhi * dpx + cosThetaSinPhi * dpy - sinTheta[k] * dpz; Type dPg2 = - sinPhi[k] * dpx + cosPhi[k] * dpy; Type dPg3 = sinThetaCosPhi * dpx + sinTheta[k] * sinPhi[k] * dpy + cosTheta[k] * dpz; Type dMg1 = cosThetaCosPhi * dmx + cosThetaSinPhi * dmy - sinTheta[k] * dmz; Type dMg2 = - sinPhi[k] * dmx + cosPhi[k] * dmy; Type dMg3 = sinThetaCosPhi * dmx + sinTheta[k] * sinPhi[k] * dmy + cosTheta[k] * dmz; // combine terms for application of adjoint g3 outP_G1[k] = B * E * dPg1; outP_G2[k] = B * E * dPg2; outP_G3[k] = B * (1 - F * A * A) * dPg3 + B * F * A * SA2 * dMg3; outM_G1[k] = B * (1 - F) * dMg1; outM_G2[k] = B * (1 - F) * dMg2; outM_G3[k] = B * F * A * SA2 * dPg3 + B * (1 - F + F * A * A) * dMg3; } } } } } template<class Type> #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline static void applyFirstDerivatives3D_TTI_MinusHalf_TimeUpdate_Nonlinear( const long freeSurface, const long nx, const long ny, const long nz, const long nthread, const Type c8_1, const Type c8_2, const Type c8_3, const Type c8_4, const Type invDx, const Type invDy, const Type invDz, const Type dt, Type * __restrict__ inP_G1, Type * __restrict__ inP_G2, Type * __restrict__ inP_G3, Type * __restrict__ inM_G1, Type * __restrict__ inM_G2, Type * __restrict__ inM_G3, Type * __restrict__ fieldVel, Type * __restrict__ fieldBuoy, Type * __restrict__ dtOmegaInvQ, float * __restrict__ sinTheta, float * __restrict__ cosTheta, float * __restrict__ sinPhi, float * __restrict__ cosPhi, Type * __restrict__ pCur, Type * __restrict__ mCur, Type * __restrict__ pSpace, Type * __restrict__ mSpace, Type * __restrict__ pOld, Type * __restrict__ mOld, const long BX_3D, const long BY_3D, const long BZ_3D) { const long nx4 = nx - 4; const long ny4 = ny - 4; const long nz4 = nz - 4; const long nynz = ny * nz; const Type dt2 = dt * dt; // zero output array: note only the annulus that is in the absorbing boundary needs to be zeroed for (long k = 0; k < 4; k++) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < nx; kx++) { #pragma omp simd for (long ky = 0; ky < ny; ky++) { const long kindex1 = kx * ny * nz + ky * nz + k; const long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k); pSpace[kindex1] = pSpace[kindex2] = 0; mSpace[kindex1] = mSpace[kindex2] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < nx; kx++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { const long kindex1 = kx * ny * nz + k * nz + kz; const long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz; pSpace[kindex1] = pSpace[kindex2] = 0; mSpace[kindex1] = mSpace[kindex2] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long ky = 0; ky < ny; ky++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { const long kindex1 = k * ny * nz + ky * nz + kz; const long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz; pSpace[kindex1] = pSpace[kindex2] = 0; mSpace[kindex1] = mSpace[kindex2] = 0; } } } // interior // TODO -- this does significantly more compute than what John has in his latest stuff. #pragma omp parallel for collapse(3) num_threads(nthread) schedule(static) for (long bx = 4; bx < nx4; bx += BX_3D) { for (long by = 4; by < ny4; by += BY_3D) { for (long bz = 4; bz < nz4; bz += BZ_3D) { const long kxmax = MIN(bx + BX_3D, nx4); const long kymax = MIN(by + BY_3D, ny4); const long kzmax = MIN(bz + BZ_3D, nz4); for (long kx = bx; kx < kxmax; kx++) { const long kxnynz = kx * nynz; for (long ky = by; ky < kymax; ky++) { const long kynz = ky * nz; const long kxnynz_kynz = kxnynz + kynz; #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long kynz_kz = + kynz + kz; const long kxm4 = (kx-4) * nynz + kynz_kz; const long kxm3 = (kx-3) * nynz + kynz_kz; const long kxm2 = (kx-2) * nynz + kynz_kz; const long kxm1 = (kx-1) * nynz + kynz_kz; const long kxp0 = (kx+0) * nynz + kynz_kz; const long kxp1 = (kx+1) * nynz + kynz_kz; const long kxp2 = (kx+2) * nynz + kynz_kz; const long kxp3 = (kx+3) * nynz + kynz_kz; const long kym4 = kxnynz + (ky-4) * nz + kz; const long kym3 = kxnynz + (ky-3) * nz + kz; const long kym2 = kxnynz + (ky-2) * nz + kz; const long kym1 = kxnynz + (ky-1) * nz + kz; const long kyp0 = kxnynz + (ky+0) * nz + kz; const long kyp1 = kxnynz + (ky+1) * nz + kz; const long kyp2 = kxnynz + (ky+2) * nz + kz; const long kyp3 = kxnynz + (ky+3) * nz + kz; const long kzm4 = kxnynz_kynz + (kz-4); const long kzm3 = kxnynz_kynz + (kz-3); const long kzm2 = kxnynz_kynz + (kz-2); const long kzm1 = kxnynz_kynz + (kz-1); const long kzp0 = kxnynz_kynz + (kz+0); const long kzp1 = kxnynz_kynz + (kz+1); const long kzp2 = kxnynz_kynz + (kz+2); const long kzp3 = kxnynz_kynz + (kz+3); // ........................ G1 ........................ const Type stencilP_G1A = c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inP_G1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inP_G1[kxp0]) + c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inP_G1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inP_G1[kxp1]) + c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inP_G1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inP_G1[kxp2]) + c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inP_G1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inP_G1[kxp3]); const Type stencilP_G1B = c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inP_G1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inP_G1[kyp0]) + c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inP_G1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inP_G1[kyp1]) + c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inP_G1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inP_G1[kyp2]) + c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inP_G1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inP_G1[kyp3]); const Type stencilP_G1C = c8_1 * (- sinTheta[kzm1] * inP_G1[kzm1] + sinTheta[kzp0] * inP_G1[kzp0]) + c8_2 * (- sinTheta[kzm2] * inP_G1[kzm2] + sinTheta[kzp1] * inP_G1[kzp1]) + c8_3 * (- sinTheta[kzm3] * inP_G1[kzm3] + sinTheta[kzp2] * inP_G1[kzp2]) + c8_4 * (- sinTheta[kzm4] * inP_G1[kzm4] + sinTheta[kzp3] * inP_G1[kzp3]); // ........................ G2 ........................ const Type stencilP_G2A = c8_1 * (- sinPhi[kxm1] * inP_G2[kxm1] + sinPhi[kxp0] * inP_G2[kxp0]) + c8_2 * (- sinPhi[kxm2] * inP_G2[kxm2] + sinPhi[kxp1] * inP_G2[kxp1]) + c8_3 * (- sinPhi[kxm3] * inP_G2[kxm3] + sinPhi[kxp2] * inP_G2[kxp2]) + c8_4 * (- sinPhi[kxm4] * inP_G2[kxm4] + sinPhi[kxp3] * inP_G2[kxp3]); const Type stencilP_G2B = c8_1 * (- cosPhi[kym1] * inP_G2[kym1] + cosPhi[kyp0] * inP_G2[kyp0]) + c8_2 * (- cosPhi[kym2] * inP_G2[kym2] + cosPhi[kyp1] * inP_G2[kyp1]) + c8_3 * (- cosPhi[kym3] * inP_G2[kym3] + cosPhi[kyp2] * inP_G2[kyp2]) + c8_4 * (- cosPhi[kym4] * inP_G2[kym4] + cosPhi[kyp3] * inP_G2[kyp3]); // ........................ G3 ........................ const Type stencilP_G3A = c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inP_G3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inP_G3[kxp0]) + c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inP_G3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inP_G3[kxp1]) + c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inP_G3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inP_G3[kxp2]) + c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inP_G3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inP_G3[kxp3]); const Type stencilP_G3B = c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inP_G3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inP_G3[kyp0]) + c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inP_G3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inP_G3[kyp1]) + c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inP_G3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inP_G3[kyp2]) + c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inP_G3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inP_G3[kyp3]); const Type stencilP_G3C = c8_1 * (- cosTheta[kzm1] * inP_G3[kzm1] + cosTheta[kzp0] * inP_G3[kzp0]) + c8_2 * (- cosTheta[kzm2] * inP_G3[kzm2] + cosTheta[kzp1] * inP_G3[kzp1]) + c8_3 * (- cosTheta[kzm3] * inP_G3[kzm3] + cosTheta[kzp2] * inP_G3[kzp2]) + c8_4 * (- cosTheta[kzm4] * inP_G3[kzm4] + cosTheta[kzp3] * inP_G3[kzp3]); // ........................ G1 ........................ const Type stencilM_G1A = c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inM_G1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inM_G1[kxp0]) + c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inM_G1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inM_G1[kxp1]) + c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inM_G1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inM_G1[kxp2]) + c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inM_G1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inM_G1[kxp3]); const Type stencilM_G1B = c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inM_G1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inM_G1[kyp0]) + c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inM_G1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inM_G1[kyp1]) + c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inM_G1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inM_G1[kyp2]) + c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inM_G1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inM_G1[kyp3]); const Type stencilM_G1C = c8_1 * (- sinTheta[kzm1] * inM_G1[kzm1] + sinTheta[kzp0] * inM_G1[kzp0]) + c8_2 * (- sinTheta[kzm2] * inM_G1[kzm2] + sinTheta[kzp1] * inM_G1[kzp1]) + c8_3 * (- sinTheta[kzm3] * inM_G1[kzm3] + sinTheta[kzp2] * inM_G1[kzp2]) + c8_4 * (- sinTheta[kzm4] * inM_G1[kzm4] + sinTheta[kzp3] * inM_G1[kzp3]); // ........................ G2 ........................ const Type stencilM_G2A = c8_1 * (- sinPhi[kxm1] * inM_G2[kxm1] + sinPhi[kxp0] * inM_G2[kxp0]) + c8_2 * (- sinPhi[kxm2] * inM_G2[kxm2] + sinPhi[kxp1] * inM_G2[kxp1]) + c8_3 * (- sinPhi[kxm3] * inM_G2[kxm3] + sinPhi[kxp2] * inM_G2[kxp2]) + c8_4 * (- sinPhi[kxm4] * inM_G2[kxm4] + sinPhi[kxp3] * inM_G2[kxp3]); const Type stencilM_G2B = c8_1 * (- cosPhi[kym1] * inM_G2[kym1] + cosPhi[kyp0] * inM_G2[kyp0]) + c8_2 * (- cosPhi[kym2] * inM_G2[kym2] + cosPhi[kyp1] * inM_G2[kyp1]) + c8_3 * (- cosPhi[kym3] * inM_G2[kym3] + cosPhi[kyp2] * inM_G2[kyp2]) + c8_4 * (- cosPhi[kym4] * inM_G2[kym4] + cosPhi[kyp3] * inM_G2[kyp3]); // ........................ G3 ........................ const Type stencilM_G3A = c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inM_G3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inM_G3[kxp0]) + c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inM_G3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inM_G3[kxp1]) + c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inM_G3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inM_G3[kxp2]) + c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inM_G3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inM_G3[kxp3]); const Type stencilM_G3B = c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inM_G3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inM_G3[kyp0]) + c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inM_G3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inM_G3[kyp1]) + c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inM_G3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inM_G3[kyp2]) + c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inM_G3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inM_G3[kyp3]); const Type stencilM_G3C = c8_1 * (- cosTheta[kzm1] * inM_G3[kzm1] + cosTheta[kzp0] * inM_G3[kzp0]) + c8_2 * (- cosTheta[kzm2] * inM_G3[kzm2] + cosTheta[kzp1] * inM_G3[kzp1]) + c8_3 * (- cosTheta[kzm3] * inM_G3[kzm3] + cosTheta[kzp2] * inM_G3[kzp2]) + c8_4 * (- cosTheta[kzm4] * inM_G3[kzm4] + cosTheta[kzp3] * inM_G3[kzp3]); const long k = kxnynz_kynz + kz; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; const Type dpg1 = invDx * stencilP_G1A + invDy * stencilP_G1B - invDz * stencilP_G1C; const Type dpg2 = - invDx * stencilP_G2A + invDy * stencilP_G2B; const Type dpg3 = invDx * stencilP_G3A + invDy * stencilP_G3B + invDz * stencilP_G3C; const Type dmg1 = invDx * stencilM_G1A + invDy * stencilM_G1B - invDz * stencilM_G1C; const Type dmg2 = - invDx * stencilM_G2A + invDy * stencilM_G2B; const Type dmg3 = invDx * stencilM_G3A + invDy * stencilM_G3B + invDz * stencilM_G3C; pSpace[k] = dpg1 + dpg2 + dpg3; mSpace[k] = dmg1 + dmg2 + dmg3; pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } } } } } } // roll on free surface if (freeSurface) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 4; kx < nx4; kx++) { const long kxnynz = kx * nynz; #pragma omp simd for (long ky = 4; ky < ny4; ky++) { const long kynz = ky * nz; const long kxnynz_kynz = kxnynz + kynz; // kz = 0 -- at the free surface -- p = 0, dp = 0 { const Type dpg1 = 0; const Type dpg2 = 0; const Type dpg3 = 0; const Type dmg1 = 0; const Type dmg2 = 0; const Type dmg3 = 0; const long k = kxnynz_kynz + 0; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; pSpace[k] = dpg1 + dpg2 + dpg3; mSpace[k] = dmg1 + dmg2 + dmg3; pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } // kz = 1 -- one cell below the free surface { const long kz = 1; const long kynz_kz = + kynz + kz; const long kxm4 = (kx-4) * nynz + kynz_kz; const long kxm3 = (kx-3) * nynz + kynz_kz; const long kxm2 = (kx-2) * nynz + kynz_kz; const long kxm1 = (kx-1) * nynz + kynz_kz; const long kxp0 = (kx+0) * nynz + kynz_kz; const long kxp1 = (kx+1) * nynz + kynz_kz; const long kxp2 = (kx+2) * nynz + kynz_kz; const long kxp3 = (kx+3) * nynz + kynz_kz; const long kym4 = kxnynz + (ky-4) * nz + kz; const long kym3 = kxnynz + (ky-3) * nz + kz; const long kym2 = kxnynz + (ky-2) * nz + kz; const long kym1 = kxnynz + (ky-1) * nz + kz; const long kyp0 = kxnynz + (ky+0) * nz + kz; const long kyp1 = kxnynz + (ky+1) * nz + kz; const long kyp2 = kxnynz + (ky+2) * nz + kz; const long kyp3 = kxnynz + (ky+3) * nz + kz; const long kzm4 = kxnynz_kynz + 2; const long kzm3 = kxnynz_kynz + 1; const long kzm2 = kxnynz_kynz + 0; const long kzm1 = kxnynz_kynz + 0; const long kzp0 = kxnynz_kynz + 1; const long kzp1 = kxnynz_kynz + 2; const long kzp2 = kxnynz_kynz + 3; const long kzp3 = kxnynz_kynz + 4; // ........................ G1 ........................ const Type stencilP_G1A = c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inP_G1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inP_G1[kxp0]) + c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inP_G1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inP_G1[kxp1]) + c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inP_G1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inP_G1[kxp2]) + c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inP_G1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inP_G1[kxp3]); const Type stencilP_G1B = c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inP_G1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inP_G1[kyp0]) + c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inP_G1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inP_G1[kyp1]) + c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inP_G1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inP_G1[kyp2]) + c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inP_G1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inP_G1[kyp3]); const Type stencilP_G1C = c8_1 * (- sinTheta[kzm1] * inP_G1[kzm1] + sinTheta[kzp0] * inP_G1[kzp0]) + c8_2 * (- sinTheta[kzm2] * inP_G1[kzm2] + sinTheta[kzp1] * inP_G1[kzp1]) + c8_3 * (- sinTheta[kzm3] * inP_G1[kzm3] + sinTheta[kzp2] * inP_G1[kzp2]) + c8_4 * (- sinTheta[kzm4] * inP_G1[kzm4] + sinTheta[kzp3] * inP_G1[kzp3]); // ........................ G2 ........................ const Type stencilP_G2A = c8_1 * (- sinPhi[kxm1] * inP_G2[kxm1] + sinPhi[kxp0] * inP_G2[kxp0]) + c8_2 * (- sinPhi[kxm2] * inP_G2[kxm2] + sinPhi[kxp1] * inP_G2[kxp1]) + c8_3 * (- sinPhi[kxm3] * inP_G2[kxm3] + sinPhi[kxp2] * inP_G2[kxp2]) + c8_4 * (- sinPhi[kxm4] * inP_G2[kxm4] + sinPhi[kxp3] * inP_G2[kxp3]); const Type stencilP_G2B = c8_1 * (- cosPhi[kym1] * inP_G2[kym1] + cosPhi[kyp0] * inP_G2[kyp0]) + c8_2 * (- cosPhi[kym2] * inP_G2[kym2] + cosPhi[kyp1] * inP_G2[kyp1]) + c8_3 * (- cosPhi[kym3] * inP_G2[kym3] + cosPhi[kyp2] * inP_G2[kyp2]) + c8_4 * (- cosPhi[kym4] * inP_G2[kym4] + cosPhi[kyp3] * inP_G2[kyp3]); // ........................ G3 ........................ const Type stencilP_G3A = c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inP_G3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inP_G3[kxp0]) + c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inP_G3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inP_G3[kxp1]) + c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inP_G3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inP_G3[kxp2]) + c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inP_G3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inP_G3[kxp3]); const Type stencilP_G3B = c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inP_G3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inP_G3[kyp0]) + c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inP_G3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inP_G3[kyp1]) + c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inP_G3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inP_G3[kyp2]) + c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inP_G3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inP_G3[kyp3]); const Type stencilP_G3C = c8_1 * (- cosTheta[kzm1] * inP_G3[kzm1] + cosTheta[kzp0] * inP_G3[kzp0]) + c8_2 * (- cosTheta[kzm2] * inP_G3[kzm2] + cosTheta[kzp1] * inP_G3[kzp1]) + c8_3 * (- cosTheta[kzm3] * inP_G3[kzm3] + cosTheta[kzp2] * inP_G3[kzp2]) + c8_4 * (- cosTheta[kzm4] * inP_G3[kzm4] + cosTheta[kzp3] * inP_G3[kzp3]); // ........................ G1 ........................ const Type stencilM_G1A = c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inM_G1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inM_G1[kxp0]) + c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inM_G1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inM_G1[kxp1]) + c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inM_G1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inM_G1[kxp2]) + c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inM_G1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inM_G1[kxp3]); const Type stencilM_G1B = c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inM_G1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inM_G1[kyp0]) + c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inM_G1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inM_G1[kyp1]) + c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inM_G1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inM_G1[kyp2]) + c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inM_G1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inM_G1[kyp3]); const Type stencilM_G1C = c8_1 * (- sinTheta[kzm1] * inM_G1[kzm1] + sinTheta[kzp0] * inM_G1[kzp0]) + c8_2 * (- sinTheta[kzm2] * inM_G1[kzm2] + sinTheta[kzp1] * inM_G1[kzp1]) + c8_3 * (- sinTheta[kzm3] * inM_G1[kzm3] + sinTheta[kzp2] * inM_G1[kzp2]) + c8_4 * (- sinTheta[kzm4] * inM_G1[kzm4] + sinTheta[kzp3] * inM_G1[kzp3]); // ........................ G2 ........................ const Type stencilM_G2A = c8_1 * (- sinPhi[kxm1] * inM_G2[kxm1] + sinPhi[kxp0] * inM_G2[kxp0]) + c8_2 * (- sinPhi[kxm2] * inM_G2[kxm2] + sinPhi[kxp1] * inM_G2[kxp1]) + c8_3 * (- sinPhi[kxm3] * inM_G2[kxm3] + sinPhi[kxp2] * inM_G2[kxp2]) + c8_4 * (- sinPhi[kxm4] * inM_G2[kxm4] + sinPhi[kxp3] * inM_G2[kxp3]); const Type stencilM_G2B = c8_1 * (- cosPhi[kym1] * inM_G2[kym1] + cosPhi[kyp0] * inM_G2[kyp0]) + c8_2 * (- cosPhi[kym2] * inM_G2[kym2] + cosPhi[kyp1] * inM_G2[kyp1]) + c8_3 * (- cosPhi[kym3] * inM_G2[kym3] + cosPhi[kyp2] * inM_G2[kyp2]) + c8_4 * (- cosPhi[kym4] * inM_G2[kym4] + cosPhi[kyp3] * inM_G2[kyp3]); // ........................ G3 ........................ const Type stencilM_G3A = c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inM_G3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inM_G3[kxp0]) + c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inM_G3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inM_G3[kxp1]) + c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inM_G3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inM_G3[kxp2]) + c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inM_G3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inM_G3[kxp3]); const Type stencilM_G3B = c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inM_G3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inM_G3[kyp0]) + c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inM_G3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inM_G3[kyp1]) + c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inM_G3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inM_G3[kyp2]) + c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inM_G3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inM_G3[kyp3]); const Type stencilM_G3C = c8_1 * (- cosTheta[kzm1] * inM_G3[kzm1] + cosTheta[kzp0] * inM_G3[kzp0]) + c8_2 * (- cosTheta[kzm2] * inM_G3[kzm2] + cosTheta[kzp1] * inM_G3[kzp1]) + c8_3 * (- cosTheta[kzm3] * inM_G3[kzm3] + cosTheta[kzp2] * inM_G3[kzp2]) + c8_4 * (- cosTheta[kzm4] * inM_G3[kzm4] + cosTheta[kzp3] * inM_G3[kzp3]); const long k = kxnynz_kynz + 1; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; const Type dpg1 = invDx * stencilP_G1A + invDy * stencilP_G1B - invDz * stencilP_G1C; const Type dpg2 = - invDx * stencilP_G2A + invDy * stencilP_G2B; const Type dpg3 = invDx * stencilP_G3A + invDy * stencilP_G3B + invDz * stencilP_G3C; const Type dmg1 = invDx * stencilM_G1A + invDy * stencilM_G1B - invDz * stencilM_G1C; const Type dmg2 = - invDx * stencilM_G2A + invDy * stencilM_G2B; const Type dmg3 = invDx * stencilM_G3A + invDy * stencilM_G3B + invDz * stencilM_G3C; pSpace[k] = dpg1 + dpg2 + dpg3; mSpace[k] = dmg1 + dmg2 + dmg3; pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } // kz = 2 -- two cells below the free surface { const long kz = 2; const long kynz_kz = + kynz + kz; const long kxm4 = (kx-4) * nynz + kynz_kz; const long kxm3 = (kx-3) * nynz + kynz_kz; const long kxm2 = (kx-2) * nynz + kynz_kz; const long kxm1 = (kx-1) * nynz + kynz_kz; const long kxp0 = (kx+0) * nynz + kynz_kz; const long kxp1 = (kx+1) * nynz + kynz_kz; const long kxp2 = (kx+2) * nynz + kynz_kz; const long kxp3 = (kx+3) * nynz + kynz_kz; const long kym4 = kxnynz + (ky-4) * nz + kz; const long kym3 = kxnynz + (ky-3) * nz + kz; const long kym2 = kxnynz + (ky-2) * nz + kz; const long kym1 = kxnynz + (ky-1) * nz + kz; const long kyp0 = kxnynz + (ky+0) * nz + kz; const long kyp1 = kxnynz + (ky+1) * nz + kz; const long kyp2 = kxnynz + (ky+2) * nz + kz; const long kyp3 = kxnynz + (ky+3) * nz + kz; const long kzm4 = kxnynz_kynz + 1; const long kzm3 = kxnynz_kynz + 0; const long kzm2 = kxnynz_kynz + 0; const long kzm1 = kxnynz_kynz + 1; const long kzp0 = kxnynz_kynz + 2; const long kzp1 = kxnynz_kynz + 3; const long kzp2 = kxnynz_kynz + 4; const long kzp3 = kxnynz_kynz + 5; // ........................ G1 ........................ const Type stencilP_G1A = c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inP_G1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inP_G1[kxp0]) + c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inP_G1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inP_G1[kxp1]) + c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inP_G1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inP_G1[kxp2]) + c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inP_G1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inP_G1[kxp3]); const Type stencilP_G1B = c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inP_G1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inP_G1[kyp0]) + c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inP_G1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inP_G1[kyp1]) + c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inP_G1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inP_G1[kyp2]) + c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inP_G1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inP_G1[kyp3]); const Type stencilP_G1C = c8_1 * (- sinTheta[kzm1] * inP_G1[kzm1] + sinTheta[kzp0] * inP_G1[kzp0]) + c8_2 * (- sinTheta[kzm2] * inP_G1[kzm2] + sinTheta[kzp1] * inP_G1[kzp1]) + c8_3 * (- sinTheta[kzm3] * inP_G1[kzm3] + sinTheta[kzp2] * inP_G1[kzp2]) + c8_4 * (- sinTheta[kzm4] * inP_G1[kzm4] + sinTheta[kzp3] * inP_G1[kzp3]); // ........................ G2 ........................ const Type stencilP_G2A = c8_1 * (- sinPhi[kxm1] * inP_G2[kxm1] + sinPhi[kxp0] * inP_G2[kxp0]) + c8_2 * (- sinPhi[kxm2] * inP_G2[kxm2] + sinPhi[kxp1] * inP_G2[kxp1]) + c8_3 * (- sinPhi[kxm3] * inP_G2[kxm3] + sinPhi[kxp2] * inP_G2[kxp2]) + c8_4 * (- sinPhi[kxm4] * inP_G2[kxm4] + sinPhi[kxp3] * inP_G2[kxp3]); const Type stencilP_G2B = c8_1 * (- cosPhi[kym1] * inP_G2[kym1] + cosPhi[kyp0] * inP_G2[kyp0]) + c8_2 * (- cosPhi[kym2] * inP_G2[kym2] + cosPhi[kyp1] * inP_G2[kyp1]) + c8_3 * (- cosPhi[kym3] * inP_G2[kym3] + cosPhi[kyp2] * inP_G2[kyp2]) + c8_4 * (- cosPhi[kym4] * inP_G2[kym4] + cosPhi[kyp3] * inP_G2[kyp3]); // ........................ G3 ........................ const Type stencilP_G3A = c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inP_G3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inP_G3[kxp0]) + c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inP_G3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inP_G3[kxp1]) + c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inP_G3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inP_G3[kxp2]) + c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inP_G3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inP_G3[kxp3]); const Type stencilP_G3B = c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inP_G3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inP_G3[kyp0]) + c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inP_G3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inP_G3[kyp1]) + c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inP_G3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inP_G3[kyp2]) + c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inP_G3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inP_G3[kyp3]); const Type stencilP_G3C = c8_1 * (- cosTheta[kzm1] * inP_G3[kzm1] + cosTheta[kzp0] * inP_G3[kzp0]) + c8_2 * (- cosTheta[kzm2] * inP_G3[kzm2] + cosTheta[kzp1] * inP_G3[kzp1]) + c8_3 * (- cosTheta[kzm3] * inP_G3[kzm3] + cosTheta[kzp2] * inP_G3[kzp2]) + c8_4 * (- cosTheta[kzm4] * inP_G3[kzm4] + cosTheta[kzp3] * inP_G3[kzp3]); // ........................ G1 ........................ const Type stencilM_G1A = c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inM_G1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inM_G1[kxp0]) + c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inM_G1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inM_G1[kxp1]) + c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inM_G1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inM_G1[kxp2]) + c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inM_G1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inM_G1[kxp3]); const Type stencilM_G1B = c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inM_G1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inM_G1[kyp0]) + c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inM_G1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inM_G1[kyp1]) + c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inM_G1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inM_G1[kyp2]) + c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inM_G1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inM_G1[kyp3]); const Type stencilM_G1C = c8_1 * (- sinTheta[kzm1] * inM_G1[kzm1] + sinTheta[kzp0] * inM_G1[kzp0]) + c8_2 * (- sinTheta[kzm2] * inM_G1[kzm2] + sinTheta[kzp1] * inM_G1[kzp1]) + c8_3 * (- sinTheta[kzm3] * inM_G1[kzm3] + sinTheta[kzp2] * inM_G1[kzp2]) + c8_4 * (- sinTheta[kzm4] * inM_G1[kzm4] + sinTheta[kzp3] * inM_G1[kzp3]); // ........................ G2 ........................ const Type stencilM_G2A = c8_1 * (- sinPhi[kxm1] * inM_G2[kxm1] + sinPhi[kxp0] * inM_G2[kxp0]) + c8_2 * (- sinPhi[kxm2] * inM_G2[kxm2] + sinPhi[kxp1] * inM_G2[kxp1]) + c8_3 * (- sinPhi[kxm3] * inM_G2[kxm3] + sinPhi[kxp2] * inM_G2[kxp2]) + c8_4 * (- sinPhi[kxm4] * inM_G2[kxm4] + sinPhi[kxp3] * inM_G2[kxp3]); const Type stencilM_G2B = c8_1 * (- cosPhi[kym1] * inM_G2[kym1] + cosPhi[kyp0] * inM_G2[kyp0]) + c8_2 * (- cosPhi[kym2] * inM_G2[kym2] + cosPhi[kyp1] * inM_G2[kyp1]) + c8_3 * (- cosPhi[kym3] * inM_G2[kym3] + cosPhi[kyp2] * inM_G2[kyp2]) + c8_4 * (- cosPhi[kym4] * inM_G2[kym4] + cosPhi[kyp3] * inM_G2[kyp3]); // ........................ G3 ........................ const Type stencilM_G3A = c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inM_G3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inM_G3[kxp0]) + c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inM_G3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inM_G3[kxp1]) + c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inM_G3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inM_G3[kxp2]) + c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inM_G3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inM_G3[kxp3]); const Type stencilM_G3B = c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inM_G3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inM_G3[kyp0]) + c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inM_G3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inM_G3[kyp1]) + c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inM_G3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inM_G3[kyp2]) + c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inM_G3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inM_G3[kyp3]); const Type stencilM_G3C = c8_1 * (- cosTheta[kzm1] * inM_G3[kzm1] + cosTheta[kzp0] * inM_G3[kzp0]) + c8_2 * (- cosTheta[kzm2] * inM_G3[kzm2] + cosTheta[kzp1] * inM_G3[kzp1]) + c8_3 * (- cosTheta[kzm3] * inM_G3[kzm3] + cosTheta[kzp2] * inM_G3[kzp2]) + c8_4 * (- cosTheta[kzm4] * inM_G3[kzm4] + cosTheta[kzp3] * inM_G3[kzp3]); const long k = kxnynz_kynz + 2; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; const Type dpg1 = invDx * stencilP_G1A + invDy * stencilP_G1B - invDz * stencilP_G1C; const Type dpg2 = - invDx * stencilP_G2A + invDy * stencilP_G2B; const Type dpg3 = invDx * stencilP_G3A + invDy * stencilP_G3B + invDz * stencilP_G3C; const Type dmg1 = invDx * stencilM_G1A + invDy * stencilM_G1B - invDz * stencilM_G1C; const Type dmg2 = - invDx * stencilM_G2A + invDy * stencilM_G2B; const Type dmg3 = invDx * stencilM_G3A + invDy * stencilM_G3B + invDz * stencilM_G3C; pSpace[k] = dpg1 + dpg2 + dpg3; mSpace[k] = dmg1 + dmg2 + dmg3; pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } // kz = 3 -- three cells below the free surface { const long kz = 3; const long kynz_kz = + kynz + kz; const long kxm4 = (kx-4) * nynz + kynz_kz; const long kxm3 = (kx-3) * nynz + kynz_kz; const long kxm2 = (kx-2) * nynz + kynz_kz; const long kxm1 = (kx-1) * nynz + kynz_kz; const long kxp0 = (kx+0) * nynz + kynz_kz; const long kxp1 = (kx+1) * nynz + kynz_kz; const long kxp2 = (kx+2) * nynz + kynz_kz; const long kxp3 = (kx+3) * nynz + kynz_kz; const long kym4 = kxnynz + (ky-4) * nz + kz; const long kym3 = kxnynz + (ky-3) * nz + kz; const long kym2 = kxnynz + (ky-2) * nz + kz; const long kym1 = kxnynz + (ky-1) * nz + kz; const long kyp0 = kxnynz + (ky+0) * nz + kz; const long kyp1 = kxnynz + (ky+1) * nz + kz; const long kyp2 = kxnynz + (ky+2) * nz + kz; const long kyp3 = kxnynz + (ky+3) * nz + kz; const long kzm4 = kxnynz_kynz + 0; const long kzm3 = kxnynz_kynz + 0; const long kzm2 = kxnynz_kynz + 1; const long kzm1 = kxnynz_kynz + 2; const long kzp0 = kxnynz_kynz + 3; const long kzp1 = kxnynz_kynz + 4; const long kzp2 = kxnynz_kynz + 5; const long kzp3 = kxnynz_kynz + 6; // ........................ G1 ........................ const Type stencilP_G1A = c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inP_G1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inP_G1[kxp0]) + c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inP_G1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inP_G1[kxp1]) + c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inP_G1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inP_G1[kxp2]) + c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inP_G1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inP_G1[kxp3]); const Type stencilP_G1B = c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inP_G1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inP_G1[kyp0]) + c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inP_G1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inP_G1[kyp1]) + c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inP_G1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inP_G1[kyp2]) + c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inP_G1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inP_G1[kyp3]); const Type stencilP_G1C = c8_1 * (- sinTheta[kzm1] * inP_G1[kzm1] + sinTheta[kzp0] * inP_G1[kzp0]) + c8_2 * (- sinTheta[kzm2] * inP_G1[kzm2] + sinTheta[kzp1] * inP_G1[kzp1]) + c8_3 * (- sinTheta[kzm3] * inP_G1[kzm3] + sinTheta[kzp2] * inP_G1[kzp2]) + c8_4 * (- sinTheta[kzm4] * inP_G1[kzm4] + sinTheta[kzp3] * inP_G1[kzp3]); // ........................ G2 ........................ const Type stencilP_G2A = c8_1 * (- sinPhi[kxm1] * inP_G2[kxm1] + sinPhi[kxp0] * inP_G2[kxp0]) + c8_2 * (- sinPhi[kxm2] * inP_G2[kxm2] + sinPhi[kxp1] * inP_G2[kxp1]) + c8_3 * (- sinPhi[kxm3] * inP_G2[kxm3] + sinPhi[kxp2] * inP_G2[kxp2]) + c8_4 * (- sinPhi[kxm4] * inP_G2[kxm4] + sinPhi[kxp3] * inP_G2[kxp3]); const Type stencilP_G2B = c8_1 * (- cosPhi[kym1] * inP_G2[kym1] + cosPhi[kyp0] * inP_G2[kyp0]) + c8_2 * (- cosPhi[kym2] * inP_G2[kym2] + cosPhi[kyp1] * inP_G2[kyp1]) + c8_3 * (- cosPhi[kym3] * inP_G2[kym3] + cosPhi[kyp2] * inP_G2[kyp2]) + c8_4 * (- cosPhi[kym4] * inP_G2[kym4] + cosPhi[kyp3] * inP_G2[kyp3]); // ........................ G3 ........................ const Type stencilP_G3A = c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inP_G3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inP_G3[kxp0]) + c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inP_G3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inP_G3[kxp1]) + c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inP_G3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inP_G3[kxp2]) + c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inP_G3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inP_G3[kxp3]); const Type stencilP_G3B = c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inP_G3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inP_G3[kyp0]) + c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inP_G3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inP_G3[kyp1]) + c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inP_G3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inP_G3[kyp2]) + c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inP_G3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inP_G3[kyp3]); const Type stencilP_G3C = c8_1 * (- cosTheta[kzm1] * inP_G3[kzm1] + cosTheta[kzp0] * inP_G3[kzp0]) + c8_2 * (- cosTheta[kzm2] * inP_G3[kzm2] + cosTheta[kzp1] * inP_G3[kzp1]) + c8_3 * (- cosTheta[kzm3] * inP_G3[kzm3] + cosTheta[kzp2] * inP_G3[kzp2]) + c8_4 * (- cosTheta[kzm4] * inP_G3[kzm4] + cosTheta[kzp3] * inP_G3[kzp3]); // ........................ G1 ........................ const Type stencilM_G1A = c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inM_G1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inM_G1[kxp0]) + c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inM_G1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inM_G1[kxp1]) + c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inM_G1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inM_G1[kxp2]) + c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inM_G1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inM_G1[kxp3]); const Type stencilM_G1B = c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inM_G1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inM_G1[kyp0]) + c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inM_G1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inM_G1[kyp1]) + c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inM_G1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inM_G1[kyp2]) + c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inM_G1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inM_G1[kyp3]); const Type stencilM_G1C = c8_1 * (- sinTheta[kzm1] * inM_G1[kzm1] + sinTheta[kzp0] * inM_G1[kzp0]) + c8_2 * (- sinTheta[kzm2] * inM_G1[kzm2] + sinTheta[kzp1] * inM_G1[kzp1]) + c8_3 * (- sinTheta[kzm3] * inM_G1[kzm3] + sinTheta[kzp2] * inM_G1[kzp2]) + c8_4 * (- sinTheta[kzm4] * inM_G1[kzm4] + sinTheta[kzp3] * inM_G1[kzp3]); // ........................ G2 ........................ const Type stencilM_G2A = c8_1 * (- sinPhi[kxm1] * inM_G2[kxm1] + sinPhi[kxp0] * inM_G2[kxp0]) + c8_2 * (- sinPhi[kxm2] * inM_G2[kxm2] + sinPhi[kxp1] * inM_G2[kxp1]) + c8_3 * (- sinPhi[kxm3] * inM_G2[kxm3] + sinPhi[kxp2] * inM_G2[kxp2]) + c8_4 * (- sinPhi[kxm4] * inM_G2[kxm4] + sinPhi[kxp3] * inM_G2[kxp3]); const Type stencilM_G2B = c8_1 * (- cosPhi[kym1] * inM_G2[kym1] + cosPhi[kyp0] * inM_G2[kyp0]) + c8_2 * (- cosPhi[kym2] * inM_G2[kym2] + cosPhi[kyp1] * inM_G2[kyp1]) + c8_3 * (- cosPhi[kym3] * inM_G2[kym3] + cosPhi[kyp2] * inM_G2[kyp2]) + c8_4 * (- cosPhi[kym4] * inM_G2[kym4] + cosPhi[kyp3] * inM_G2[kyp3]); // ........................ G3 ........................ const Type stencilM_G3A = c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inM_G3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inM_G3[kxp0]) + c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inM_G3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inM_G3[kxp1]) + c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inM_G3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inM_G3[kxp2]) + c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inM_G3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inM_G3[kxp3]); const Type stencilM_G3B = c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inM_G3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inM_G3[kyp0]) + c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inM_G3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inM_G3[kyp1]) + c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inM_G3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inM_G3[kyp2]) + c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inM_G3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inM_G3[kyp3]); const Type stencilM_G3C = c8_1 * (- cosTheta[kzm1] * inM_G3[kzm1] + cosTheta[kzp0] * inM_G3[kzp0]) + c8_2 * (- cosTheta[kzm2] * inM_G3[kzm2] + cosTheta[kzp1] * inM_G3[kzp1]) + c8_3 * (- cosTheta[kzm3] * inM_G3[kzm3] + cosTheta[kzp2] * inM_G3[kzp2]) + c8_4 * (- cosTheta[kzm4] * inM_G3[kzm4] + cosTheta[kzp3] * inM_G3[kzp3]); const long k = kxnynz_kynz + 3; const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k]; const Type dpg1 = invDx * stencilP_G1A + invDy * stencilP_G1B - invDz * stencilP_G1C; const Type dpg2 = - invDx * stencilP_G2A + invDy * stencilP_G2B; const Type dpg3 = invDx * stencilP_G3A + invDy * stencilP_G3B + invDz * stencilP_G3C; const Type dmg1 = invDx * stencilM_G1A + invDy * stencilM_G1B - invDz * stencilM_G1C; const Type dmg2 = - invDx * stencilM_G2A + invDy * stencilM_G2B; const Type dmg3 = invDx * stencilM_G3A + invDy * stencilM_G3B + invDz * stencilM_G3C; pSpace[k] = dpg1 + dpg2 + dpg3; mSpace[k] = dmg1 + dmg2 + dmg3; pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k]; mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k]; } } } } } template<class Type> #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline static void applyFirstDerivatives3D_TTI_PlusHalf( const long freeSurface, const long nx, const long ny, const long nz, const long nthread, const Type c8_1, const Type c8_2, const Type c8_3, const Type c8_4, const Type invDx, const Type invDy, const Type invDz, Type * __restrict__ inG1, Type * __restrict__ inG2, Type * __restrict__ inG3, float * __restrict__ sinTheta, float * __restrict__ cosTheta, float * __restrict__ sinPhi, float * __restrict__ cosPhi, Type * __restrict__ outG1, Type * __restrict__ outG2, Type * __restrict__ outG3, const long BX_3D, const long BY_3D, const long BZ_3D) { const long nx4 = nx - 4; const long ny4 = ny - 4; const long nz4 = nz - 4; const long nynz = ny * nz; // zero output array: note only the annulus that is in the absorbing boundary needs to be zeroed for (long k = 0; k < 4; k++) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < nx; kx++) { #pragma omp simd for (long ky = 0; ky < ny; ky++) { long kindex1 = kx * ny * nz + ky * nz + k; long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k); outG1[kindex1] = outG1[kindex2] = 0; outG2[kindex1] = outG2[kindex2] = 0; outG3[kindex1] = outG3[kindex2] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < nx; kx++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { long kindex1 = kx * ny * nz + k * nz + kz; long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz; outG1[kindex1] = outG1[kindex2] = 0; outG2[kindex1] = outG2[kindex2] = 0; outG3[kindex1] = outG3[kindex2] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long ky = 0; ky < ny; ky++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { long kindex1 = k * ny * nz + ky * nz + kz; long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz; outG1[kindex1] = outG1[kindex2] = 0; outG2[kindex1] = outG2[kindex2] = 0; outG3[kindex1] = outG3[kindex2] = 0; } } } // interior #pragma omp parallel for collapse(3) num_threads(nthread) schedule(static) for (long bx = 4; bx < nx4; bx += BX_3D) { for (long by = 4; by < ny4; by += BY_3D) { for (long bz = 4; bz < nz4; bz += BZ_3D) { const long kxmax = MIN(bx + BX_3D, nx4); const long kymax = MIN(by + BY_3D, ny4); const long kzmax = MIN(bz + BZ_3D, nz4); for (long kx = bx; kx < kxmax; kx++) { const long kxnynz = kx * nynz; for (long ky = by; ky < kymax; ky++) { const long kynz = ky * nz; const long kxnynz_kynz = kxnynz + kynz; #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long kynz_kz = + kynz + kz; const Type stencilG1 = c8_1 * (- inG1[(kx+0) * nynz + kynz_kz] + inG1[(kx+1) * nynz + kynz_kz]) + c8_2 * (- inG1[(kx-1) * nynz + kynz_kz] + inG1[(kx+2) * nynz + kynz_kz]) + c8_3 * (- inG1[(kx-2) * nynz + kynz_kz] + inG1[(kx+3) * nynz + kynz_kz]) + c8_4 * (- inG1[(kx-3) * nynz + kynz_kz] + inG1[(kx+4) * nynz + kynz_kz]); const Type stencilG2 = c8_1 * (- inG2[kxnynz + (ky+0) * nz + kz] + inG2[kxnynz + (ky+1) * nz + kz]) + c8_2 * (- inG2[kxnynz + (ky-1) * nz + kz] + inG2[kxnynz + (ky+2) * nz + kz]) + c8_3 * (- inG2[kxnynz + (ky-2) * nz + kz] + inG2[kxnynz + (ky+3) * nz + kz]) + c8_4 * (- inG2[kxnynz + (ky-3) * nz + kz] + inG2[kxnynz + (ky+4) * nz + kz]); const Type stencilG3 = c8_1 * (- inG3[kxnynz_kynz + (kz+0)] + inG3[kxnynz_kynz + (kz+1)]) + c8_2 * (- inG3[kxnynz_kynz + (kz-1)] + inG3[kxnynz_kynz + (kz+2)]) + c8_3 * (- inG3[kxnynz_kynz + (kz-2)] + inG3[kxnynz_kynz + (kz+3)]) + c8_4 * (- inG3[kxnynz_kynz + (kz-3)] + inG3[kxnynz_kynz + (kz+4)]); long k = kxnynz_kynz + kz; const Type dx = invDx * stencilG1; const Type dy = invDy * stencilG2; const Type dz = invDz * stencilG3; const float cosThetaCosPhi = cosTheta[k] * cosPhi[k]; const float cosThetaSinPhi = cosTheta[k] * sinPhi[k]; const float sinThetaCosPhi = sinTheta[k] * cosPhi[k]; outG1[k] = cosThetaCosPhi * dx + cosThetaSinPhi * dy - sinTheta[k] * dz; outG2[k] = - sinPhi[k] * dx + cosPhi[k] * dy; outG3[k] = sinThetaCosPhi * dx + sinTheta[k] * sinPhi[k] * dy + cosTheta[k] * dz; } } } } } } // roll on free surface if (freeSurface) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 4; kx < nx4; kx++) { const long kxnynz = kx * nynz; #pragma omp simd for (long ky = 4; ky < ny4; ky++) { const long kynz = ky * nz; const long kxnynz_kynz = kxnynz + kynz; // kz = 0 -- 1/2 cells below free surface for Z derivative, at free surface for X/Y derivative // X and Y derivatives are identically zero { const Type stencilG30 = c8_1 * (- inG3[kxnynz_kynz + 0] + inG3[kxnynz_kynz + 1]) + c8_2 * (+ inG3[kxnynz_kynz + 1] + inG3[kxnynz_kynz + 2]) + c8_3 * (+ inG3[kxnynz_kynz + 2] + inG3[kxnynz_kynz + 3]) + c8_4 * (+ inG3[kxnynz_kynz + 3] + inG3[kxnynz_kynz + 4]); const long k0 = kxnynz_kynz + 0; const Type dz0 = invDz * stencilG30; outG1[k0] = -sinTheta[k0] * dz0; outG2[k0] = 0; outG3[k0] = cosTheta[k0] * dz0; } // kz = 1 -- 1 1/2 cells below free surface for Z derivative, 1 cells below for X/Y derivative { const Type stencilG11 = c8_1 * (- inG1[(kx+0) * nynz + kynz + 1] + inG1[(kx+1) * nynz + kynz + 1]) + c8_2 * (- inG1[(kx-1) * nynz + kynz + 1] + inG1[(kx+2) * nynz + kynz + 1]) + c8_3 * (- inG1[(kx-2) * nynz + kynz + 1] + inG1[(kx+3) * nynz + kynz + 1]) + c8_4 * (- inG1[(kx-3) * nynz + kynz + 1] + inG1[(kx+4) * nynz + kynz + 1]); const Type stencilG21 = c8_1 * (- inG2[kxnynz + (ky+0) * nz + 1] + inG2[kxnynz + (ky+1) * nz + 1]) + c8_2 * (- inG2[kxnynz + (ky-1) * nz + 1] + inG2[kxnynz + (ky+2) * nz + 1]) + c8_3 * (- inG2[kxnynz + (ky-2) * nz + 1] + inG2[kxnynz + (ky+3) * nz + 1]) + c8_4 * (- inG2[kxnynz + (ky-3) * nz + 1] + inG2[kxnynz + (ky+4) * nz + 1]); const Type stencilG31 = c8_1 * (- inG3[kxnynz_kynz + 1] + inG3[kxnynz_kynz + 2]) + c8_2 * (- inG3[kxnynz_kynz + 0] + inG3[kxnynz_kynz + 3]) + c8_3 * (+ inG3[kxnynz_kynz + 1] + inG3[kxnynz_kynz + 4]) + c8_4 * (+ inG3[kxnynz_kynz + 2] + inG3[kxnynz_kynz + 5]); const long k1 = kxnynz_kynz + 1; const Type dx1 = invDx * stencilG11; const Type dy1 = invDy * stencilG21; const Type dz1 = invDz * stencilG31; outG1[k1] = cosTheta[k1] * cosPhi[k1] * dx1 + cosTheta[k1] * sinPhi[k1] * dy1 - sinTheta[k1] * dz1; outG2[k1] = - sinPhi[k1] * dx1 + cosPhi[k1] * dy1; outG3[k1] = sinTheta[k1] * cosPhi[k1] * dx1 + sinTheta[k1] * sinPhi[k1] * dy1 + cosTheta[k1] * dz1; } // kz = 2 -- 2 1/2 cells below free surface for Z derivative, 2 cells below for X/Y derivative { const Type stencilG12 = c8_1 * (- inG1[(kx+0) * nynz + kynz + 2] + inG1[(kx+1) * nynz + kynz + 2]) + c8_2 * (- inG1[(kx-1) * nynz + kynz + 2] + inG1[(kx+2) * nynz + kynz + 2]) + c8_3 * (- inG1[(kx-2) * nynz + kynz + 2] + inG1[(kx+3) * nynz + kynz + 2]) + c8_4 * (- inG1[(kx-3) * nynz + kynz + 2] + inG1[(kx+4) * nynz + kynz + 2]); const Type stencilG22 = c8_1 * (- inG2[kxnynz + (ky+0) * nz + 2] + inG2[kxnynz + (ky+1) * nz + 2]) + c8_2 * (- inG2[kxnynz + (ky-1) * nz + 2] + inG2[kxnynz + (ky+2) * nz + 2]) + c8_3 * (- inG2[kxnynz + (ky-2) * nz + 2] + inG2[kxnynz + (ky+3) * nz + 2]) + c8_4 * (- inG2[kxnynz + (ky-3) * nz + 2] + inG2[kxnynz + (ky+4) * nz + 2]); const Type stencilG32 = c8_1 * (- inG3[kxnynz_kynz + 2] + inG3[kxnynz_kynz + 3]) + c8_2 * (- inG3[kxnynz_kynz + 1] + inG3[kxnynz_kynz + 4]) + c8_3 * (- inG3[kxnynz_kynz + 0] + inG3[kxnynz_kynz + 5]) + c8_4 * (+ inG3[kxnynz_kynz + 1] + inG3[kxnynz_kynz + 6]); const long k2 = kxnynz_kynz + 2; const Type dx2 = invDx * stencilG12; const Type dy2 = invDy * stencilG22; const Type dz2 = invDz * stencilG32; outG1[k2] = cosTheta[k2] * cosPhi[k2] * dx2 + cosTheta[k2] * sinPhi[k2] * dy2 - sinTheta[k2] * dz2; outG2[k2] = - sinPhi[k2] * dx2 + cosPhi[k2] * dy2; outG3[k2] = sinTheta[k2] * cosPhi[k2] * dx2 + sinTheta[k2] * sinPhi[k2] * dy2 + cosTheta[k2] * dz2; } // kz = 3 -- 3 1/2 cells below free surface for Z derivative, 3 cells below for X/Y derivative { const Type stencilG13 = c8_1 * (- inG1[(kx+0) * nynz + kynz + 3] + inG1[(kx+1) * nynz + kynz + 3]) + c8_2 * (- inG1[(kx-1) * nynz + kynz + 3] + inG1[(kx+2) * nynz + kynz + 3]) + c8_3 * (- inG1[(kx-2) * nynz + kynz + 3] + inG1[(kx+3) * nynz + kynz + 3]) + c8_4 * (- inG1[(kx-3) * nynz + kynz + 3] + inG1[(kx+4) * nynz + kynz + 3]); const Type stencilG23 = c8_1 * (- inG2[kxnynz + (ky+0) * nz + 3] + inG2[kxnynz + (ky+1) * nz + 3]) + c8_2 * (- inG2[kxnynz + (ky-1) * nz + 3] + inG2[kxnynz + (ky+2) * nz + 3]) + c8_3 * (- inG2[kxnynz + (ky-2) * nz + 3] + inG2[kxnynz + (ky+3) * nz + 3]) + c8_4 * (- inG2[kxnynz + (ky-3) * nz + 3] + inG2[kxnynz + (ky+4) * nz + 3]); const Type stencilG33 = c8_1 * (- inG3[kxnynz_kynz + 3] + inG3[kxnynz_kynz + 4]) + c8_2 * (- inG3[kxnynz_kynz + 2] + inG3[kxnynz_kynz + 5]) + c8_3 * (- inG3[kxnynz_kynz + 1] + inG3[kxnynz_kynz + 6]) + c8_4 * (- inG3[kxnynz_kynz + 0] + inG3[kxnynz_kynz + 7]); const long k3 = kxnynz_kynz + 3; const Type dx3 = invDx * stencilG13; const Type dy3 = invDy * stencilG23; const Type dz3 = invDz * stencilG33; outG1[k3] = cosTheta[k3] * cosPhi[k3] * dx3 + cosTheta[k3] * sinPhi[k3] * dy3 - sinTheta[k3] * dz3; outG2[k3] = - sinPhi[k3] * dx3 + cosPhi[k3] * dy3; outG3[k3] = sinTheta[k3] * cosPhi[k3] * dx3 + sinTheta[k3] * sinPhi[k3] * dy3 + cosTheta[k3] * dz3; } } } } } template<class Type> #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline static void applyFirstDerivatives3D_TTI_MinusHalf( const long freeSurface, const long nx, const long ny, const long nz, const long nthread, const Type c8_1, const Type c8_2, const Type c8_3, const Type c8_4, const Type invDx, const Type invDy, const Type invDz, Type * __restrict__ inG1, Type * __restrict__ inG2, Type * __restrict__ inG3, float * __restrict__ sinTheta, float * __restrict__ cosTheta, float * __restrict__ sinPhi, float * __restrict__ cosPhi, Type * __restrict__ outG1, Type * __restrict__ outG2, Type * __restrict__ outG3, const long BX_3D, const long BY_3D, const long BZ_3D) { const long nx4 = nx - 4; const long ny4 = ny - 4; const long nz4 = nz - 4; const long nynz = ny * nz; // zero output array: note only the annulus that is in the absorbing boundary needs to be zeroed for (long k = 0; k < 4; k++) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < nx; kx++) { #pragma omp simd for (long ky = 0; ky < ny; ky++) { const long kindex1 = kx * ny * nz + ky * nz + k; const long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k); outG1[kindex1] = outG1[kindex2] = 0; outG2[kindex1] = outG2[kindex2] = 0; outG3[kindex1] = outG3[kindex2] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < nx; kx++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { const long kindex1 = kx * ny * nz + k * nz + kz; const long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz; outG1[kindex1] = outG1[kindex2] = 0; outG2[kindex1] = outG2[kindex2] = 0; outG3[kindex1] = outG3[kindex2] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long ky = 0; ky < ny; ky++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { const long kindex1 = k * ny * nz + ky * nz + kz; const long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz; outG1[kindex1] = outG1[kindex2] = 0; outG2[kindex1] = outG2[kindex2] = 0; outG3[kindex1] = outG3[kindex2] = 0; } } } // interior #pragma omp parallel for collapse(3) num_threads(nthread) schedule(static) for (long bx = 4; bx < nx4; bx += BX_3D) { for (long by = 4; by < ny4; by += BY_3D) { for (long bz = 4; bz < nz4; bz += BZ_3D) { const long kxmax = MIN(bx + BX_3D, nx4); const long kymax = MIN(by + BY_3D, ny4); const long kzmax = MIN(bz + BZ_3D, nz4); for (long kx = bx; kx < kxmax; kx++) { const long kxnynz = kx * nynz; for (long ky = by; ky < kymax; ky++) { const long kynz = ky * nz; const long kxnynz_kynz = kxnynz + kynz; #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long kynz_kz = + kynz + kz; const long kxm4 = (kx-4) * nynz + kynz_kz; const long kxm3 = (kx-3) * nynz + kynz_kz; const long kxm2 = (kx-2) * nynz + kynz_kz; const long kxm1 = (kx-1) * nynz + kynz_kz; const long kxp0 = (kx+0) * nynz + kynz_kz; const long kxp1 = (kx+1) * nynz + kynz_kz; const long kxp2 = (kx+2) * nynz + kynz_kz; const long kxp3 = (kx+3) * nynz + kynz_kz; const long kym4 = kxnynz + (ky-4) * nz + kz; const long kym3 = kxnynz + (ky-3) * nz + kz; const long kym2 = kxnynz + (ky-2) * nz + kz; const long kym1 = kxnynz + (ky-1) * nz + kz; const long kyp0 = kxnynz + (ky+0) * nz + kz; const long kyp1 = kxnynz + (ky+1) * nz + kz; const long kyp2 = kxnynz + (ky+2) * nz + kz; const long kyp3 = kxnynz + (ky+3) * nz + kz; const long kzm4 = kxnynz_kynz + (kz-4); const long kzm3 = kxnynz_kynz + (kz-3); const long kzm2 = kxnynz_kynz + (kz-2); const long kzm1 = kxnynz_kynz + (kz-1); const long kzp0 = kxnynz_kynz + (kz+0); const long kzp1 = kxnynz_kynz + (kz+1); const long kzp2 = kxnynz_kynz + (kz+2); const long kzp3 = kxnynz_kynz + (kz+3); // ........................ G1 ........................ const Type stencilG1A = c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inG1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inG1[kxp0]) + c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inG1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inG1[kxp1]) + c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inG1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inG1[kxp2]) + c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inG1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inG1[kxp3]); const Type stencilG1B = c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inG1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inG1[kyp0]) + c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inG1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inG1[kyp1]) + c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inG1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inG1[kyp2]) + c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inG1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inG1[kyp3]); const Type stencilG1C = c8_1 * (- sinTheta[kzm1] * inG1[kzm1] + sinTheta[kzp0] * inG1[kzp0]) + c8_2 * (- sinTheta[kzm2] * inG1[kzm2] + sinTheta[kzp1] * inG1[kzp1]) + c8_3 * (- sinTheta[kzm3] * inG1[kzm3] + sinTheta[kzp2] * inG1[kzp2]) + c8_4 * (- sinTheta[kzm4] * inG1[kzm4] + sinTheta[kzp3] * inG1[kzp3]); // ........................ G2 ........................ const Type stencilG2A = c8_1 * (- sinPhi[kxm1] * inG2[kxm1] + sinPhi[kxp0] * inG2[kxp0]) + c8_2 * (- sinPhi[kxm2] * inG2[kxm2] + sinPhi[kxp1] * inG2[kxp1]) + c8_3 * (- sinPhi[kxm3] * inG2[kxm3] + sinPhi[kxp2] * inG2[kxp2]) + c8_4 * (- sinPhi[kxm4] * inG2[kxm4] + sinPhi[kxp3] * inG2[kxp3]); const Type stencilG2B = c8_1 * (- cosPhi[kym1] * inG2[kym1] + cosPhi[kyp0] * inG2[kyp0]) + c8_2 * (- cosPhi[kym2] * inG2[kym2] + cosPhi[kyp1] * inG2[kyp1]) + c8_3 * (- cosPhi[kym3] * inG2[kym3] + cosPhi[kyp2] * inG2[kyp2]) + c8_4 * (- cosPhi[kym4] * inG2[kym4] + cosPhi[kyp3] * inG2[kyp3]); // ........................ G3 ........................ const Type stencilG3A = c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inG3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inG3[kxp0]) + c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inG3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inG3[kxp1]) + c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inG3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inG3[kxp2]) + c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inG3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inG3[kxp3]); const Type stencilG3B = c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inG3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inG3[kyp0]) + c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inG3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inG3[kyp1]) + c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inG3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inG3[kyp2]) + c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inG3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inG3[kyp3]); const Type stencilG3C = c8_1 * (- cosTheta[kzm1] * inG3[kzm1] + cosTheta[kzp0] * inG3[kzp0]) + c8_2 * (- cosTheta[kzm2] * inG3[kzm2] + cosTheta[kzp1] * inG3[kzp1]) + c8_3 * (- cosTheta[kzm3] * inG3[kzm3] + cosTheta[kzp2] * inG3[kzp2]) + c8_4 * (- cosTheta[kzm4] * inG3[kzm4] + cosTheta[kzp3] * inG3[kzp3]); const long k = kxnynz_kynz + kz; outG1[k] = invDx * stencilG1A + invDy * stencilG1B - invDz * stencilG1C; outG2[k] = - invDx * stencilG2A + invDy * stencilG2B; outG3[k] = invDx * stencilG3A + invDy * stencilG3B + invDz * stencilG3C; } } } } } } // roll on free surface if (freeSurface) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 4; kx < nx4; kx++) { const long kxnynz = kx * nynz; #pragma omp simd for (long ky = 4; ky < ny4; ky++) { const long kynz = ky * nz; const long kxnynz_kynz = kxnynz + kynz; { // kz = 0 -- at the free surface -- p = 0, dp = 0 const long k = kxnynz_kynz + 0; outG1[k] = 0; outG2[k] = 0; outG3[k] = 0; } // kz = 1 -- one cell below the free surface { const long kz = 1; const long kynz_kz = + kynz + kz; const long kxm4 = (kx-4) * nynz + kynz_kz; const long kxm3 = (kx-3) * nynz + kynz_kz; const long kxm2 = (kx-2) * nynz + kynz_kz; const long kxm1 = (kx-1) * nynz + kynz_kz; const long kxp0 = (kx+0) * nynz + kynz_kz; const long kxp1 = (kx+1) * nynz + kynz_kz; const long kxp2 = (kx+2) * nynz + kynz_kz; const long kxp3 = (kx+3) * nynz + kynz_kz; const long kym4 = kxnynz + (ky-4) * nz + kz; const long kym3 = kxnynz + (ky-3) * nz + kz; const long kym2 = kxnynz + (ky-2) * nz + kz; const long kym1 = kxnynz + (ky-1) * nz + kz; const long kyp0 = kxnynz + (ky+0) * nz + kz; const long kyp1 = kxnynz + (ky+1) * nz + kz; const long kyp2 = kxnynz + (ky+2) * nz + kz; const long kyp3 = kxnynz + (ky+3) * nz + kz; const long kzm4 = kxnynz_kynz + 2; const long kzm3 = kxnynz_kynz + 1; const long kzm2 = kxnynz_kynz + 0; const long kzm1 = kxnynz_kynz + 0; const long kzp0 = kxnynz_kynz + 1; const long kzp1 = kxnynz_kynz + 2; const long kzp2 = kxnynz_kynz + 3; const long kzp3 = kxnynz_kynz + 4; // ........................ G1 ........................ const Type stencilG1A = c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inG1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inG1[kxp0]) + c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inG1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inG1[kxp1]) + c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inG1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inG1[kxp2]) + c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inG1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inG1[kxp3]); const Type stencilG1B = c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inG1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inG1[kyp0]) + c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inG1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inG1[kyp1]) + c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inG1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inG1[kyp2]) + c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inG1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inG1[kyp3]); const Type stencilG1C = c8_1 * (- sinTheta[kzm1] * inG1[kzm1] + sinTheta[kzp0] * inG1[kzp0]) + c8_2 * (- sinTheta[kzm2] * inG1[kzm2] + sinTheta[kzp1] * inG1[kzp1]) + c8_3 * (- sinTheta[kzm3] * inG1[kzm3] + sinTheta[kzp2] * inG1[kzp2]) + c8_4 * (- sinTheta[kzm4] * inG1[kzm4] + sinTheta[kzp3] * inG1[kzp3]); // ........................ G2 ........................ const Type stencilG2A = c8_1 * (- sinPhi[kxm1] * inG2[kxm1] + sinPhi[kxp0] * inG2[kxp0]) + c8_2 * (- sinPhi[kxm2] * inG2[kxm2] + sinPhi[kxp1] * inG2[kxp1]) + c8_3 * (- sinPhi[kxm3] * inG2[kxm3] + sinPhi[kxp2] * inG2[kxp2]) + c8_4 * (- sinPhi[kxm4] * inG2[kxm4] + sinPhi[kxp3] * inG2[kxp3]); const Type stencilG2B = c8_1 * (- cosPhi[kym1] * inG2[kym1] + cosPhi[kyp0] * inG2[kyp0]) + c8_2 * (- cosPhi[kym2] * inG2[kym2] + cosPhi[kyp1] * inG2[kyp1]) + c8_3 * (- cosPhi[kym3] * inG2[kym3] + cosPhi[kyp2] * inG2[kyp2]) + c8_4 * (- cosPhi[kym4] * inG2[kym4] + cosPhi[kyp3] * inG2[kyp3]); // ........................ G3 ........................ const Type stencilG3A = c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inG3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inG3[kxp0]) + c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inG3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inG3[kxp1]) + c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inG3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inG3[kxp2]) + c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inG3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inG3[kxp3]); const Type stencilG3B = c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inG3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inG3[kyp0]) + c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inG3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inG3[kyp1]) + c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inG3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inG3[kyp2]) + c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inG3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inG3[kyp3]); const Type stencilG3C = c8_1 * (- cosTheta[kzm1] * inG3[kzm1] + cosTheta[kzp0] * inG3[kzp0]) + c8_2 * (- cosTheta[kzm2] * inG3[kzm2] + cosTheta[kzp1] * inG3[kzp1]) + c8_3 * (- cosTheta[kzm3] * inG3[kzm3] + cosTheta[kzp2] * inG3[kzp2]) + c8_4 * (- cosTheta[kzm4] * inG3[kzm4] + cosTheta[kzp3] * inG3[kzp3]); const long k = kxnynz_kynz + kz; outG1[k] = invDx * stencilG1A + invDy * stencilG1B - invDz * stencilG1C; outG2[k] = - invDx * stencilG2A + invDy * stencilG2B; outG3[k] = invDx * stencilG3A + invDy * stencilG3B + invDz * stencilG3C; } // kz = 2 -- two cells below the free surface { const long kz = 2; const long kynz_kz = + kynz + kz; const long kxm4 = (kx-4) * nynz + kynz_kz; const long kxm3 = (kx-3) * nynz + kynz_kz; const long kxm2 = (kx-2) * nynz + kynz_kz; const long kxm1 = (kx-1) * nynz + kynz_kz; const long kxp0 = (kx+0) * nynz + kynz_kz; const long kxp1 = (kx+1) * nynz + kynz_kz; const long kxp2 = (kx+2) * nynz + kynz_kz; const long kxp3 = (kx+3) * nynz + kynz_kz; const long kym4 = kxnynz + (ky-4) * nz + kz; const long kym3 = kxnynz + (ky-3) * nz + kz; const long kym2 = kxnynz + (ky-2) * nz + kz; const long kym1 = kxnynz + (ky-1) * nz + kz; const long kyp0 = kxnynz + (ky+0) * nz + kz; const long kyp1 = kxnynz + (ky+1) * nz + kz; const long kyp2 = kxnynz + (ky+2) * nz + kz; const long kyp3 = kxnynz + (ky+3) * nz + kz; const long kzm4 = kxnynz_kynz + 1; const long kzm3 = kxnynz_kynz + 0; const long kzm2 = kxnynz_kynz + 0; const long kzm1 = kxnynz_kynz + 1; const long kzp0 = kxnynz_kynz + 2; const long kzp1 = kxnynz_kynz + 3; const long kzp2 = kxnynz_kynz + 4; const long kzp3 = kxnynz_kynz + 5; // ........................ G1 ........................ const Type stencilG1A = c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inG1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inG1[kxp0]) + c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inG1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inG1[kxp1]) + c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inG1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inG1[kxp2]) + c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inG1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inG1[kxp3]); const Type stencilG1B = c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inG1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inG1[kyp0]) + c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inG1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inG1[kyp1]) + c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inG1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inG1[kyp2]) + c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inG1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inG1[kyp3]); const Type stencilG1C = c8_1 * (- sinTheta[kzm1] * inG1[kzm1] + sinTheta[kzp0] * inG1[kzp0]) + c8_2 * (- sinTheta[kzm2] * inG1[kzm2] + sinTheta[kzp1] * inG1[kzp1]) + c8_3 * (- sinTheta[kzm3] * inG1[kzm3] + sinTheta[kzp2] * inG1[kzp2]) + c8_4 * (- sinTheta[kzm4] * inG1[kzm4] + sinTheta[kzp3] * inG1[kzp3]); // ........................ G2 ........................ const Type stencilG2A = c8_1 * (- sinPhi[kxm1] * inG2[kxm1] + sinPhi[kxp0] * inG2[kxp0]) + c8_2 * (- sinPhi[kxm2] * inG2[kxm2] + sinPhi[kxp1] * inG2[kxp1]) + c8_3 * (- sinPhi[kxm3] * inG2[kxm3] + sinPhi[kxp2] * inG2[kxp2]) + c8_4 * (- sinPhi[kxm4] * inG2[kxm4] + sinPhi[kxp3] * inG2[kxp3]); const Type stencilG2B = c8_1 * (- cosPhi[kym1] * inG2[kym1] + cosPhi[kyp0] * inG2[kyp0]) + c8_2 * (- cosPhi[kym2] * inG2[kym2] + cosPhi[kyp1] * inG2[kyp1]) + c8_3 * (- cosPhi[kym3] * inG2[kym3] + cosPhi[kyp2] * inG2[kyp2]) + c8_4 * (- cosPhi[kym4] * inG2[kym4] + cosPhi[kyp3] * inG2[kyp3]); // ........................ G3 ........................ const Type stencilG3A = c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inG3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inG3[kxp0]) + c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inG3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inG3[kxp1]) + c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inG3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inG3[kxp2]) + c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inG3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inG3[kxp3]); const Type stencilG3B = c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inG3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inG3[kyp0]) + c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inG3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inG3[kyp1]) + c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inG3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inG3[kyp2]) + c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inG3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inG3[kyp3]); const Type stencilG3C = c8_1 * (- cosTheta[kzm1] * inG3[kzm1] + cosTheta[kzp0] * inG3[kzp0]) + c8_2 * (- cosTheta[kzm2] * inG3[kzm2] + cosTheta[kzp1] * inG3[kzp1]) + c8_3 * (- cosTheta[kzm3] * inG3[kzm3] + cosTheta[kzp2] * inG3[kzp2]) + c8_4 * (- cosTheta[kzm4] * inG3[kzm4] + cosTheta[kzp3] * inG3[kzp3]); const long k = kxnynz_kynz + kz; outG1[k] = invDx * stencilG1A + invDy * stencilG1B - invDz * stencilG1C; outG2[k] = - invDx * stencilG2A + invDy * stencilG2B; outG3[k] = invDx * stencilG3A + invDy * stencilG3B + invDz * stencilG3C; } // kz = 3 -- three cells below the free surface { const long kz = 3; const long kynz_kz = + kynz + kz; const long kxm4 = (kx-4) * nynz + kynz_kz; const long kxm3 = (kx-3) * nynz + kynz_kz; const long kxm2 = (kx-2) * nynz + kynz_kz; const long kxm1 = (kx-1) * nynz + kynz_kz; const long kxp0 = (kx+0) * nynz + kynz_kz; const long kxp1 = (kx+1) * nynz + kynz_kz; const long kxp2 = (kx+2) * nynz + kynz_kz; const long kxp3 = (kx+3) * nynz + kynz_kz; const long kym4 = kxnynz + (ky-4) * nz + kz; const long kym3 = kxnynz + (ky-3) * nz + kz; const long kym2 = kxnynz + (ky-2) * nz + kz; const long kym1 = kxnynz + (ky-1) * nz + kz; const long kyp0 = kxnynz + (ky+0) * nz + kz; const long kyp1 = kxnynz + (ky+1) * nz + kz; const long kyp2 = kxnynz + (ky+2) * nz + kz; const long kyp3 = kxnynz + (ky+3) * nz + kz; const long kzm4 = kxnynz_kynz + 0; const long kzm3 = kxnynz_kynz + 0; const long kzm2 = kxnynz_kynz + 1; const long kzm1 = kxnynz_kynz + 2; const long kzp0 = kxnynz_kynz + 3; const long kzp1 = kxnynz_kynz + 4; const long kzp2 = kxnynz_kynz + 5; const long kzp3 = kxnynz_kynz + 6; // ........................ G1 ........................ const Type stencilG1A = c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inG1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inG1[kxp0]) + c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inG1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inG1[kxp1]) + c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inG1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inG1[kxp2]) + c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inG1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inG1[kxp3]); const Type stencilG1B = c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inG1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inG1[kyp0]) + c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inG1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inG1[kyp1]) + c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inG1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inG1[kyp2]) + c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inG1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inG1[kyp3]); const Type stencilG1C = c8_1 * (- sinTheta[kzm1] * inG1[kzm1] + sinTheta[kzp0] * inG1[kzp0]) + c8_2 * (- sinTheta[kzm2] * inG1[kzm2] + sinTheta[kzp1] * inG1[kzp1]) + c8_3 * (- sinTheta[kzm3] * inG1[kzm3] + sinTheta[kzp2] * inG1[kzp2]) + c8_4 * (- sinTheta[kzm4] * inG1[kzm4] + sinTheta[kzp3] * inG1[kzp3]); // ........................ G2 ........................ const Type stencilG2A = c8_1 * (- sinPhi[kxm1] * inG2[kxm1] + sinPhi[kxp0] * inG2[kxp0]) + c8_2 * (- sinPhi[kxm2] * inG2[kxm2] + sinPhi[kxp1] * inG2[kxp1]) + c8_3 * (- sinPhi[kxm3] * inG2[kxm3] + sinPhi[kxp2] * inG2[kxp2]) + c8_4 * (- sinPhi[kxm4] * inG2[kxm4] + sinPhi[kxp3] * inG2[kxp3]); const Type stencilG2B = c8_1 * (- cosPhi[kym1] * inG2[kym1] + cosPhi[kyp0] * inG2[kyp0]) + c8_2 * (- cosPhi[kym2] * inG2[kym2] + cosPhi[kyp1] * inG2[kyp1]) + c8_3 * (- cosPhi[kym3] * inG2[kym3] + cosPhi[kyp2] * inG2[kyp2]) + c8_4 * (- cosPhi[kym4] * inG2[kym4] + cosPhi[kyp3] * inG2[kyp3]); // ........................ G3 ........................ const Type stencilG3A = c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inG3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inG3[kxp0]) + c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inG3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inG3[kxp1]) + c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inG3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inG3[kxp2]) + c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inG3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inG3[kxp3]); const Type stencilG3B = c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inG3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inG3[kyp0]) + c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inG3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inG3[kyp1]) + c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inG3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inG3[kyp2]) + c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inG3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inG3[kyp3]); const Type stencilG3C = c8_1 * (- cosTheta[kzm1] * inG3[kzm1] + cosTheta[kzp0] * inG3[kzp0]) + c8_2 * (- cosTheta[kzm2] * inG3[kzm2] + cosTheta[kzp1] * inG3[kzp1]) + c8_3 * (- cosTheta[kzm3] * inG3[kzm3] + cosTheta[kzp2] * inG3[kzp2]) + c8_4 * (- cosTheta[kzm4] * inG3[kzm4] + cosTheta[kzp3] * inG3[kzp3]); const long k = kxnynz_kynz + kz; outG1[k] = invDx * stencilG1A + invDy * stencilG1B - invDz * stencilG1C; outG2[k] = - invDx * stencilG2A + invDy * stencilG2B; outG3[k] = invDx * stencilG3A + invDy * stencilG3B + invDz * stencilG3C; } } } } } }; #endif
Simulation.c
#include "XSbench_header.h" //////////////////////////////////////////////////////////////////////////////////// // BASELINE FUNCTIONS //////////////////////////////////////////////////////////////////////////////////// // All "baseline" code is at the top of this file. The baseline code is a simple // implementation of the algorithm, with only minor CPU optimizations in place. // Following these functions are a number of optimized variants, // which each deploy a different combination of optimizations strategies. By // default, XSBench will only run the baseline implementation. Optimized variants // are not yet implemented for this OpenMP targeting offload port. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation(Inputs in, SimulationData SD, int mype) { if( mype == 0) printf("Beginning event based simulation...\n"); //////////////////////////////////////////////////////////////////////////////// // SUMMARY: Simulation Data Structure Manifest for "SD" Object // Here we list all heap arrays (and lengths) in SD that would need to be // offloaded manually if using an accelerator with a seperate memory space //////////////////////////////////////////////////////////////////////////////// // int * num_nucs; // Length = length_num_nucs; // double * concs; // Length = length_concs // int * mats; // Length = length_mats // double * unionized_energy_array; // Length = length_unionized_energy_array // int * index_grid; // Length = length_index_grid // NuclideGridPoint * nuclide_grid; // Length = length_nuclide_grid // // Note: "unionized_energy_array" and "index_grid" can be of zero length // depending on lookup method. // // Note: "Lengths" are given as the number of objects in the array, not the // number of bytes. //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Begin Actual Simulation Loop //////////////////////////////////////////////////////////////////////////////// unsigned long long * verification = (unsigned long long *) malloc(in.lookups * sizeof(unsigned long long)); #pragma omp target teams distribute parallel for\ map(to: SD.max_num_nucs)\ map(to: SD.num_nucs[:SD.length_num_nucs])\ map(to: SD.concs[:SD.length_concs])\ map(to: SD.mats[:SD.length_mats])\ map(to: SD.unionized_energy_array[:SD.length_unionized_energy_array])\ map(to: SD.index_grid[:SD.length_index_grid])\ map(to: SD.nuclide_grid[:SD.length_nuclide_grid])\ map(from: verification[:in.lookups]) for( int i = 0; i < in.lookups; i++ ) { // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, 2*i); // Randomly pick an energy and material for the particle double p_energy = LCG_random_double(&seed); int mat = pick_mat(&seed); // debugging //printf("E = %lf mat = %d\n", p_energy, mat); double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( p_energy, // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation SD.num_nucs, // 1-D array with number of nuclides per material SD.concs, // Flattened 2-D array with concentration of each nuclide in each material SD.unionized_energy_array, // 1-D Unionized energy array SD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level SD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation SD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) SD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we prevent thread // contention by using an OMP reduction on the verification value. // For accelerators, a different approach might be required // (e.g., atomics, reduction of thread-specific values in large // array via CUDA thrust, etc). double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } verification[i] = max_idx+1; } // Reduce validation hash on the host unsigned long long validation_hash = 0; for( int i = 0; i < in.lookups; i++ ) validation_hash += verification[i]; return validation_hash; } // Calculates the microscopic cross section for a given nuclide & energy void calculate_micro_xs( double p_energy, int nuc, long n_isotopes, long n_gridpoints, double * egrid, int * index_data, NuclideGridPoint * nuclide_grids, long idx, double * xs_vector, int grid_type, int hash_bins ){ // Variables double f; NuclideGridPoint * low, * high; // If using only the nuclide grid, we must perform a binary search // to find the energy location in this particular nuclide's grid. if( grid_type == NUCLIDE ) { // Perform binary search on the Nuclide Grid to find the index idx = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], 0, n_gridpoints-1); // pull ptr from nuclide grid and check to ensure that // we're not reading off the end of the nuclide's grid if( idx == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + idx - 1]; else low = &nuclide_grids[nuc*n_gridpoints + idx]; } else if( grid_type == UNIONIZED) // Unionized Energy Grid - we already know the index, no binary search needed. { // pull ptr from energy grid and check to ensure that // we're not reading off the end of the nuclide's grid if( index_data[idx * n_isotopes + nuc] == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc] - 1]; else low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc]]; } else // Hash grid { // load lower bounding index int u_low = index_data[idx * n_isotopes + nuc]; // Determine higher bounding index int u_high; if( idx == hash_bins - 1 ) u_high = n_gridpoints - 1; else u_high = index_data[(idx+1)*n_isotopes + nuc] + 1; // Check edge cases to make sure energy is actually between these // Then, if things look good, search for gridpoint in the nuclide grid // within the lower and higher limits we've calculated. double e_low = nuclide_grids[nuc*n_gridpoints + u_low].energy; double e_high = nuclide_grids[nuc*n_gridpoints + u_high].energy; int lower; if( p_energy <= e_low ) lower = 0; else if( p_energy >= e_high ) lower = n_gridpoints - 1; else lower = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], u_low, u_high); if( lower == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + lower - 1]; else low = &nuclide_grids[nuc*n_gridpoints + lower]; } high = low + 1; // calculate the re-useable interpolation factor f = (high->energy - p_energy) / (high->energy - low->energy); // Total XS xs_vector[0] = high->total_xs - f * (high->total_xs - low->total_xs); // Elastic XS xs_vector[1] = high->elastic_xs - f * (high->elastic_xs - low->elastic_xs); // Absorbtion XS xs_vector[2] = high->absorbtion_xs - f * (high->absorbtion_xs - low->absorbtion_xs); // Fission XS xs_vector[3] = high->fission_xs - f * (high->fission_xs - low->fission_xs); // Nu Fission XS xs_vector[4] = high->nu_fission_xs - f * (high->nu_fission_xs - low->nu_fission_xs); //test /* if( omp_get_thread_num() == 0 ) { printf("Lookup: Energy = %lf, nuc = %d\n", p_energy, nuc); printf("e_h = %lf e_l = %lf\n", high->energy , low->energy); printf("xs_h = %lf xs_l = %lf\n", high->elastic_xs, low->elastic_xs); printf("total_xs = %lf\n\n", xs_vector[1]); } */ } // Calculates macroscopic cross section based on a given material & energy void calculate_macro_xs( double p_energy, int mat, long n_isotopes, long n_gridpoints, int * num_nucs, double * concs, double * egrid, int * index_data, NuclideGridPoint * nuclide_grids, int * mats, double * macro_xs_vector, int grid_type, int hash_bins, int max_num_nucs ){ int p_nuc; // the nuclide we are looking up long idx = -1; double conc; // the concentration of the nuclide in the material // cleans out macro_xs_vector for( int k = 0; k < 5; k++ ) macro_xs_vector[k] = 0; // If we are using the unionized energy grid (UEG), we only // need to perform 1 binary search per macroscopic lookup. // If we are using the nuclide grid search, it will have to be // done inside of the "calculate_micro_xs" function for each different // nuclide in the material. if( grid_type == UNIONIZED ) idx = grid_search( n_isotopes * n_gridpoints, p_energy, egrid); else if( grid_type == HASH ) { double du = 1.0 / hash_bins; idx = p_energy / du; } // Once we find the pointer array on the UEG, we can pull the data // from the respective nuclide grids, as well as the nuclide // concentration data for the material // Each nuclide from the material needs to have its micro-XS array // looked up & interpolatied (via calculate_micro_xs). Then, the // micro XS is multiplied by the concentration of that nuclide // in the material, and added to the total macro XS array. // (Independent -- though if parallelizing, must use atomic operations // or otherwise control access to the xs_vector and macro_xs_vector to // avoid simulataneous writing to the same data structure) for( int j = 0; j < num_nucs[mat]; j++ ) { double xs_vector[5]; p_nuc = mats[mat*max_num_nucs + j]; conc = concs[mat*max_num_nucs + j]; calculate_micro_xs( p_energy, p_nuc, n_isotopes, n_gridpoints, egrid, index_data, nuclide_grids, idx, xs_vector, grid_type, hash_bins ); for( int k = 0; k < 5; k++ ) macro_xs_vector[k] += xs_vector[k] * conc; } //test /* for( int k = 0; k < 5; k++ ) printf("Energy: %lf, Material: %d, XSVector[%d]: %lf\n", p_energy, mat, k, macro_xs_vector[k]); */ } // binary search for energy on unionized energy grid // returns lower index long grid_search( long n, double quarry, double * A) { long lowerLimit = 0; long upperLimit = n-1; long examinationPoint; long length = upperLimit - lowerLimit; while( length > 1 ) { examinationPoint = lowerLimit + ( length / 2 ); if( A[examinationPoint] > quarry ) upperLimit = examinationPoint; else lowerLimit = examinationPoint; length = upperLimit - lowerLimit; } return lowerLimit; } // binary search for energy on nuclide energy grid long grid_search_nuclide( long n, double quarry, NuclideGridPoint * A, long low, long high) { long lowerLimit = low; long upperLimit = high; long examinationPoint; long length = upperLimit - lowerLimit; while( length > 1 ) { examinationPoint = lowerLimit + ( length / 2 ); if( A[examinationPoint].energy > quarry ) upperLimit = examinationPoint; else lowerLimit = examinationPoint; length = upperLimit - lowerLimit; } return lowerLimit; } // picks a material based on a probabilistic distribution int pick_mat( uint64_t * seed ) { // I have a nice spreadsheet supporting these numbers. They are // the fractions (by volume) of material in the core. Not a // *perfect* approximation of where XS lookups are going to occur, // but this will do a good job of biasing the system nonetheless. // Also could be argued that doing fractions by weight would be // a better approximation, but volume does a good enough job for now. double dist[12]; dist[0] = 0.140; // fuel dist[1] = 0.052; // cladding dist[2] = 0.275; // cold, borated water dist[3] = 0.134; // hot, borated water dist[4] = 0.154; // RPV dist[5] = 0.064; // Lower, radial reflector dist[6] = 0.066; // Upper reflector / top plate dist[7] = 0.055; // bottom plate dist[8] = 0.008; // bottom nozzle dist[9] = 0.015; // top nozzle dist[10] = 0.025; // top of fuel assemblies dist[11] = 0.013; // bottom of fuel assemblies double roll = LCG_random_double(seed); // makes a pick based on the distro for( int i = 0; i < 12; i++ ) { double running = 0; for( int j = i; j > 0; j-- ) running += dist[j]; if( roll < running ) return i; } return 0; } double LCG_random_double(uint64_t * seed) { // LCG parameters const uint64_t m = 9223372036854775808ULL; // 2^63 const uint64_t a = 2806196910506780709ULL; const uint64_t c = 1ULL; *seed = (a * (*seed) + c) % m; return (double) (*seed) / (double) m; //return ldexp(*seed, -63); } uint64_t fast_forward_LCG(uint64_t seed, uint64_t n) { // LCG parameters const uint64_t m = 9223372036854775808ULL; // 2^63 uint64_t a = 2806196910506780709ULL; uint64_t c = 1ULL; n = n % m; uint64_t a_new = 1; uint64_t c_new = 0; while(n > 0) { if(n & 1) { a_new *= a; c_new = c_new * a + c; } c *= (a + 1); a *= a; n >>= 1; } return (a_new * seed + c_new) % m; }
residual_criteria.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #if !defined(KRATOS_RESIDUAL_CRITERIA ) #define KRATOS_RESIDUAL_CRITERIA // System includes // External includes // Project includes #include "includes/model_part.h" #include "includes/define.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualCriteria * @ingroup KratosCore * @brief This is a convergence criteria that employes the residual as criteria * @details The reactions from the RHS are not computed in the residual * @author Riccardo Rossi */ template<class TSparseSpace, class TDenseSpace > class ResidualCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION( ResidualCriteria ); typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef std::size_t IndexType; typedef std::size_t SizeType; ///@} ///@name Life Cycle ///@{ //* Constructor. explicit ResidualCriteria(Kratos::Parameters Settings) : BaseType() { if (Settings.Has("residual_absolute_tolerance")) { mAlwaysConvergedNorm = Settings["residual_absolute_tolerance"].GetDouble(); } else if (Settings.Has("absolute_tolerance")) { mAlwaysConvergedNorm = Settings["absolute_tolerance"].GetDouble(); } else { KRATOS_WARNING("ResidualCriteria") << "residual_absolute_tolerance or absolute_tolerance nor defined on settings. Using default 1.0e-9" << std::endl; mAlwaysConvergedNorm = 1.0e-9; } if (Settings.Has("residual_relative_tolerance")) { mRatioTolerance = Settings["residual_relative_tolerance"].GetDouble(); } else if (Settings.Has("relative_tolerance")) { mRatioTolerance = Settings["relative_tolerance"].GetDouble(); } else { KRATOS_WARNING("ResidualCriteria") << "residual_relative_tolerance or relative_tolerance nor defined on settings. Using default 1.0e-4" << std::endl; mRatioTolerance = 1.0e-4; } this->mActualizeRHSIsNeeded = true; } //* Constructor. explicit ResidualCriteria( TDataType NewRatioTolerance, TDataType AlwaysConvergedNorm) : BaseType(), mRatioTolerance(NewRatioTolerance), mAlwaysConvergedNorm(AlwaysConvergedNorm) { this->mActualizeRHSIsNeeded = true; } //* Copy constructor. explicit ResidualCriteria( ResidualCriteria const& rOther ) :BaseType(rOther) ,mRatioTolerance(rOther.mRatioTolerance) ,mInitialResidualNorm(rOther.mInitialResidualNorm) ,mCurrentResidualNorm(rOther.mCurrentResidualNorm) ,mAlwaysConvergedNorm(rOther.mAlwaysConvergedNorm) ,mReferenceDispNorm(rOther.mReferenceDispNorm) { this->mActualizeRHSIsNeeded = true; } //* Destructor. ~ResidualCriteria() override {} ///@} ///@name Operators ///@{ /** * @brief Criterias that need to be called after getting the solution * @details Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param A System matrix (unused) * @param Dx Vector of results (variations on nodal variables) * @param b RHS vector (residual + reactions) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& A, const TSystemVectorType& Dx, const TSystemVectorType& b ) override { const SizeType size_b = TSparseSpace::Size(b); if (size_b != 0) { //if we are solving for something SizeType size_residual; CalculateResidualNorm(rModelPart, mCurrentResidualNorm, size_residual, rDofSet, b); TDataType ratio = 0.0; if(mInitialResidualNorm < std::numeric_limits<TDataType>::epsilon()) { ratio = 0.0; } else { ratio = mCurrentResidualNorm/mInitialResidualNorm; } const TDataType float_size_residual = static_cast<TDataType>(size_residual); const TDataType absolute_norm = (mCurrentResidualNorm/float_size_residual); KRATOS_INFO_IF("RESIDUAL CRITERION", this->GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) << " :: [ Obtained ratio = " << ratio << "; Expected ratio = " << mRatioTolerance << "; Absolute norm = " << absolute_norm << "; Expected norm = " << mAlwaysConvergedNorm << "]" << std::endl; rModelPart.GetProcessInfo()[CONVERGENCE_RATIO] = ratio; rModelPart.GetProcessInfo()[RESIDUAL_NORM] = absolute_norm; if (ratio <= mRatioTolerance || absolute_norm < mAlwaysConvergedNorm) { KRATOS_INFO_IF("RESIDUAL CRITERION", this->GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) << "Convergence is achieved" << std::endl; return true; } else { return false; } } else { return true; } } /** * @brief This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the problem. (unused) */ void Initialize(ModelPart& rModelPart) override { BaseType::Initialize(rModelPart); } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual + reactions) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { BaseType::InitializeSolutionStep(rModelPart, rDofSet, rA, rDx, rb); // Filling mActiveDofs when MPC exist if (rModelPart.NumberOfMasterSlaveConstraints() > 0) { mActiveDofs.resize(rDofSet.size()); #pragma omp parallel for for(int i=0; i<static_cast<int>(mActiveDofs.size()); ++i) { mActiveDofs[i] = true; } #pragma omp parallel for for (int i=0; i<static_cast<int>(rDofSet.size()); ++i) { const auto it_dof = rDofSet.begin() + i; if (it_dof->IsFixed()) { mActiveDofs[it_dof->EquationId()] = false; } } for (const auto& r_mpc : rModelPart.MasterSlaveConstraints()) { for (const auto& r_dof : r_mpc.GetMasterDofsVector()) { mActiveDofs[r_dof->EquationId()] = false; } for (const auto& r_dof : r_mpc.GetSlaveDofsVector()) { mActiveDofs[r_dof->EquationId()] = false; } } } SizeType size_residual; CalculateResidualNorm(rModelPart, mInitialResidualNorm, size_residual, rDofSet, rb); } /** * @brief This function finalizes the solution step * @param rModelPart Reference to the ModelPart containing the problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param A System matrix (unused) * @param Dx Vector of results (variations on nodal variables) * @param b RHS vector (residual + reactions) */ void FinalizeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& A, const TSystemVectorType& Dx, const TSystemVectorType& b ) override { BaseType::FinalizeSolutionStep(rModelPart, rDofSet, A, Dx, b); } ///@} ///@name Operations ///@{ ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualCriteria"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method computes the norm of the residual * @details It checks if the dof is fixed * @param rModelPart Reference to the ModelPart containing the problem. * @param rResidualSolutionNorm The norm of the residual * @param rDofNum The number of DoFs * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param b RHS vector (residual + reactions) */ virtual void CalculateResidualNorm( ModelPart& rModelPart, TDataType& rResidualSolutionNorm, SizeType& rDofNum, DofsArrayType& rDofSet, const TSystemVectorType& b ) { // Initialize TDataType residual_solution_norm = TDataType(); SizeType dof_num = 0; // Auxiliar values TDataType residual_dof_value = 0.0; const auto it_dof_begin = rDofSet.begin(); const int number_of_dof = static_cast<int>(rDofSet.size()); // Loop over Dofs if (rModelPart.NumberOfMasterSlaveConstraints() > 0) { #pragma omp parallel for firstprivate(residual_dof_value) reduction(+:residual_solution_norm, dof_num) for (int i = 0; i < number_of_dof; i++) { auto it_dof = it_dof_begin + i; const IndexType dof_id = it_dof->EquationId(); if (mActiveDofs[dof_id]) { residual_dof_value = TSparseSpace::GetValue(b,dof_id); residual_solution_norm += std::pow(residual_dof_value, 2); dof_num++; } } } else { #pragma omp parallel for firstprivate(residual_dof_value) reduction(+:residual_solution_norm, dof_num) for (int i = 0; i < number_of_dof; i++) { auto it_dof = it_dof_begin + i; if (!it_dof->IsFixed()) { const IndexType dof_id = it_dof->EquationId(); residual_dof_value = TSparseSpace::GetValue(b,dof_id); residual_solution_norm += std::pow(residual_dof_value, 2); dof_num++; } } } rDofNum = dof_num; rResidualSolutionNorm = std::sqrt(residual_solution_norm); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ TDataType mRatioTolerance; /// The ratio threshold for the norm of the residual TDataType mInitialResidualNorm; /// The reference norm of the residual TDataType mCurrentResidualNorm; /// The current norm of the residual TDataType mAlwaysConvergedNorm; /// The absolute value threshold for the norm of the residual TDataType mReferenceDispNorm; /// The norm at the beginning of the iterations std::vector<bool> mActiveDofs; /// This vector contains the dofs that are active ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class ClassName ///@} ///@name Type Definitions ///@{ ///@} } // namespace Kratos. #endif // KRATOS_NEW_DISPLACEMENT_CRITERIA defined
18_omp_last_priv_nested.c
// clang-format off // RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | FileCheck %s // RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S | FileCheck %s --check-prefix=check-inst // REQUIRES: openmp // clang-format on // NOTE: This test has limited applicability in this scenario: // lastprivate(x) copies address used in the MPI_send, and subsequently copies the result back to "x*". // The data flow tracker detects only the usage of the copy in the context of MPI (see "foo() and func(...)") // NOTE 2: with optimization the parameter "x" of MPI_Send mock call in the parallel loop gets "undef" #include "omp.h" extern void MPI_Send(void*, int); void func(int* x, int* e) { // lastprivate - addr(!) value of x is copied to "private_val" (which is tracked) in outlined region // , and "int x=1;" is thus not tracked. // check-inst: define {{.*}} @func // check-inst: define {{.*}} @.omp_outlined // check-inst: call void @__typeart_alloc_stack_omp(i8* %0, i32 10, i64 1) #pragma omp parallel for lastprivate(x), shared(e) for (int i = 0; i < 10; ++i) { // Analysis should not filter x, but e... MPI_Send((void*)x, *e); } } void foo() { // check-inst: define {{.*}} @foo // check-inst-NOT: call void @__typeart_alloc_stack int x = 1; int y = 2; #pragma omp parallel { func(&x, &y); } } void func_other(int* x, int* e) { // lastprivate - addr(!) value of x is copied to "private_val" (which is tracked) in outlined region // check-inst: define {{.*}} @func_other // check-inst: define {{.*}} @.omp_outlined // check-inst: call void @__typeart_alloc_stack_omp(i8* %0, i32 10, i64 1) #pragma omp parallel for lastprivate(x), shared(e) for (int i = 0; i < 10; ++i) { // Analysis should not filter x, but e... MPI_Send(x, *e); } MPI_Send(x, *e); } void bar(int x_other) { // check-inst: define {{.*}} @bar // check-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1) int x = x_other; int y = 2; #pragma omp parallel { func_other(&x, &y); } } // CHECK: TypeArtPass [Heap & Stack] // CHECK-NEXT: Malloc : 0 // CHECK-NEXT: Free : 0 // CHECK-NEXT: Alloca : 3 // CHECK-NEXT: Global : 0