source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_binop__isne_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_fc64) // A.*B function (eWiseMult): GB (_AemultB_01__isne_fc64) // A.*B function (eWiseMult): GB (_AemultB_02__isne_fc64) // A.*B function (eWiseMult): GB (_AemultB_03__isne_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_fc64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__isne_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__isne_fc64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_fc64) // C=scalar+B GB (_bind1st__isne_fc64) // C=scalar+B' GB (_bind1st_tran__isne_fc64) // C=A+scalar GB (_bind2nd__isne_fc64) // C=A'+scalar GB (_bind2nd_tran__isne_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_FC64_isne (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC64_isne (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_FC64 || GxB_NO_ISNE_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isne_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isne_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isne_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC64_isne (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC64_isne (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_isne (x, aij) ; \ } GrB_Info GB (_bind1st_tran__isne_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_isne (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__isne_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sxc_fmt_plug.c
/* SXC cracker patch for JtR. Hacked together during Summer of 2012 by * Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_sxc; #elif FMT_REGISTERS_H john_register_one(&fmt_sxc); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "sha.h" #include <openssl/blowfish.h> #include "pbkdf2_hmac_sha1.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2 // tuned on core i7 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "sxc" #define FORMAT_NAME "StarOffice .sxc" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME " Blowfish" #else #define ALGORITHM_NAME "SHA1 Blowfish 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 20 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests sxc_tests[] = { {"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*259cafe530bd09f8*16*8f53ea878d0795cfe05dcc65fb272c20*1024*1024*ffb0f736b69d8433f958e8f475f609948ad7c9dd052f2b92c14cb1b395ffcac043a3def76d58442e131701b3b53d56ea570633bb20c83068542420160f5db3cee5eece05b67b54d0d4cdf3fbfd928d94852924e391aa3f70cad598b48b946399b0cd1e9e7e7d081a888933f8a1973e83166799396e8290771463c623620b51fb5310b9b0e0de3e5597b66a091301ada3ba6a5d7515d1fcf0eff65e543b50f8fe2222619600054eaf69c7aa680c96bc403f115cab32d6d8e8bc0db3904a71ce3eb1283ca73fd6f75845d9b7d0275e4727a0f56bfbf962a9079f51849de2c9dee7f1dadbbae944f442169281004773309b0f8d68f2cf83076fd8b19afbccf5ab7dc58fb9554fee82e2c491d6434a4cef6f3209775343c840bc4bdfab6705000e67412ac74d00f5b6aba1fd21bca5213234a5a1100a9d93daa141a4936723ea77d008a35c9078167a3529706432b36b9ec012c060d093535c85ca6feb75165d620d7d663c3e76b9bf3af614556ed8560b446a8a73649cb935383a30b4fd8fd75522203e4575cf4bc2b7f01a9294310fe021c71acbf68f6f1e95f48c30c14151c51d4fb878a16272ee73753bc750cbd48007c842412ca1dcb6214215b082c00d619a5318e2ebe9149410f501170093784afc2bd71dd9f5a87b349b96661747b1627e8cba8a5c98559fb146fa7e30db4c6f648ce3c2209f84551a7a1cd46d9172ae1354b6d093f89f6f5f58d29c1d7af8830df62e67753caa8166322caa0f8adf4b61d2013d35baa7c002e1d4c83b1cba8aaa57cf4946627fa63ba7a6a5a5c803e8d5a4794845ab670ef950b918a360cd9f12e8f3424ecab1f505cb494ad35f28d12ff183471d0f47bd67e6abd3b8c8e206d11149474a19b5c13d165d8f6dc39cf579fe1000295328aeeb82e0ae8020d2f61e4c3d6e68c25a655ab72aad5e9e74af4cf27c74158fdb1a29a3d76cd658976fa0a30743247408df00a23b593f68861348a6c46af05d21a4b81fedbf5715462ec8ffc5f001a85c43058ac1fab488236588ef0bf08dd8dd7c7fce630a0a996395b503647d9a2f0dd63dd2f939eca8e1849ee4ed41a6d5672d947177e8f890692de879a20dd9e366ec494d270faf0d24fc076172a25998aac218586404687e7c77b55e77e0eff9b1c65c3f8da99deaa86411ab6aca2531d84b364349591bc73e7504163afd23c5208e321883ee611ea7e4e5885086e4fa7196e16b948cb54808b64b94106c74900e3190fd5f6068b490fd0c9c64481771527a0e2d00899fd5b7a9e7f508cc6770018fadf09d965d7a12ad3624d2161d9546d4a7937b5f961d7f7c4714786380c147e1ec6b0583503bd5a139b892831d1ea925993bb86f12e75d9010ceba230a1c286fa3d1d654a1672313cbf0763c05c622cee452f76957c42ba0e853ecda163d15e8600a702ccdc9e8f88a", "Ghe+t0Blaster"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*9bb755c8a4fe8c34*16*112b9d41098c8677615755361da473a6*1024*1024*b95f0f2e0e1c7b4ee61168b646804d4b70b615f3c978cec65c9a7ab515417c79625d104373fd5012c3da6b356f8408a3a75edcc8b2aad0aa38bb33edd8933bdadbffde35a350ade73ccb9df29c2996082f5e94e324496835f8dfebe15ca38950e0f435d711ef964aa09915d58287967b5e321ca195a7f90253157afe82329da9a496c97292419b9a94cdb92f919e6d54700466aff61c200c5a355905b5a37c12d77b0e4ffd23f0204cfa664f4c0545f233db8d35af5fe337b459135da398fd23101becb194db305496474ba4179a7355285a9ec935044e1831f290f5f87ed3e00925e7fb4fc6bc38d9f0cfe9abf72560400490d2fd398d2d49516b618f99168602f323dd1786bcca394830341dfbeb377f9b7ef161dc1470f5e92b6152fa7a4f428e8ae40100791491a9e1c9385298522320488f00535866ac6e08354a75b8b2fd293066da7eb6b4ad7f3e13c8dc98cd815b2393f147fdac6279f76fdac9abd0a94131fa84fe4e99634a362a56d60ce588f6e0b66d6f8b6d411511272ffe32181d20e7d2c3d4b680764607afb2c29dcb94a845b920e96f6c27575534f8b7f9ddd93bdcef0d717d0a899fa937e7d2eeeb6d5b0338757f6e69dac72524d4b6f74edce1f937008eb3653bcc31a88712af940cf47ec3f3efd83e4da89d1a6cb7da6cf8d7d41430bc81a4b5d7bb46cad687f2f505e3379143ae274eed6201c3b17c1e05e516a14cbf2351ccf9fdd46e1309afb170bd01eb8f6a1d8e12441525199455fb550e3fc689b1801332b2d985e336b158f846fcbca18fbe6ea21438cf1fb5fdbce8d6350e65d6468342880845675ec721af2fb9df917a3968b4a1a477fc4c74ee38a71a230d77c2a7cf66ae6b83804488cbd25213ebc470cd845a2691b16161a640ebb385aa2381dc91f692f6c4ca2709b5a7e94dfb4548000a29b56f1da08701945d6209fabbd1621b28849fc27810775f1a0e0204d3ae9040a8cfb1386499a39d87149cfc1579de7d059662ad25a67abd42b30bb3608f09142ca030351c3a1e921e4c7bbc11aab846ef42eb5d1418c15ada77539aca096e0678439cd1b60950d2aa0cc4d2004b1ac48dc6a454c5a8e9ea7e910047c7c83895fd614fd9dfd961631eb23757646143c2aeb03c1a6476e78fc4ccf0f02cc1f88ec1b0080a170ac6871dc183939f7a4376965b0dfa7922012582eec4846ee621edc5547a2b9c4893e7f67f76541a4bd4a91827a57b3db5cdea29a2a3cc20238d89c8145c14b037360ad27f54f87317ef70472d6b1fd9f1168bcf8aba6071257b3adebab8d4e115188ed4af3fc3574fdccb4bc7eeb00a6a442f1b96a989b735f5e6059ec72c1677b77f437dcb93066f8591a11071799c3a0ec3b48f6160976aff1928c375358837e1ef02e20397b2e9d8d9c4bff23172c9b4c0b941cb1b49b5bc070f72a14cd384", "M1racl33"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*ceb1edb1e3cb72fd*16*f7104c9b2789540f5fd4beef009c0139*1024*1024*709130b940a9663d0a5687133c6f78535d05a72936faed8c2f3c1b4e29423baaabcee4f0d7d57e3ad8d8c090486f974c4d0ce4be5b29ef8e1b02c01b4af1959ed0b277146a45aec35a48997b584b82697803193644eefd88a7eefcae8819839e13702f887278a597dd954babd82bf71bf8ca8559af0e5537be0264e358d36b4f5067960edf608de731e04d117de953386aadee71849edbc494fac3e6b14567a9e9c545a06d402acd3158441829f25478ed0f9086dabd2d3913b123b43c27176f8f08f30312d84e82d47654097a2bce95554357db3ce3d45a7441472067f55e4ea6244a3dedc23db4bea8f549109ffac382cf5b652c5b1ee431bcab1051567c263a9d668c5d6a15a6f8da754914746c1d3c7eb6347bdd8d6a3ac82e4c742fcf8721913c111dfd5398f2698db00f7220d2a3562e02f7f7a6505af3ba1ee10b46f2ab5b5d2f52d288fd12814c6edbcb8d50b6e8716fba0d5962747b971689fe75e94fa36ec39598ea30e15ab2b9c9f22ca04b890a13b18fb3c7a962050426bb2da08c8b993608b9c1ffd0a21e0c74e993242ead8eb30f86d7d2dcdbd4774d85c2e06adbe4b40050ff0ac1a8afe8fbc2175ec4da4676a691b1fce38421175734c20f07a604fea5287e1c33b420aa9db4de9bd97382c161b4ec0818add675e52ebf036aad779f24b824be4b2b013c470ff66cbf44f5800e128a3b328e80a5fd6295b9b3a94e915f9add6710cb9444432751a7a31c3a3422f48a5eabc26d9a52571b8447bdd0a5977ff7153d95337cef7ff2ec29774332fbeed6ee5eed5e12288cc13e14ba9d5ff3dd052e28ba96715f5b95d7ea214ebcd9e60b26308eb11370b824b5cff2644dd2117985b3c25ba8076d4025cf3a3a62da62d5e11d44422a142048e8cd00c7de6a0a55fd5dc09a3ed01dfe35b88268f351b6ff289fee8e52ac29fe32d9990e0d6d87f39727b6a762bac9d509c6ea235fc8bedc3bec2143eae9fd2cb831b798ef8261d72785002638b940947de0aad64f791f9a27e5b091e55adf4aee0649f6785bdd37e0248fedd1759d771aeacacb3ff6e7cf2d045f791428ab61710b54e869213393caf1b6bc99066678351deafc290cecc1f6b40b5532adbbab9a70408c61a437d4483b6a75cb61a55b20881efc0d849e0f60c1887f0fa091672179a145c4ab1b6487a0e939e0123d5aaffa3aec66ab593f9c25d27f22f4a73a999a4ab45e8bc7d71a85e2d40afadad1a1dc0b8389f96f91614293fa205583ef1c3440e3df50e8aa5f1a13e5929b72cd003461ff03d44d8c84bdada176b24459021d398b2b91b61a9c0b553a8714c703d32452c691a33f1581e98c2439514ca3e7deeef90850f8d6d89bf1d3a5762a56ef769ea588f5c1705bfb7b944cfbbb0632718ee3722f4e1929b35706d6413a315a11bc16349af109a7e675df2ab1eebe93", "excel123"}, {NULL} }; #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)]; static struct custom_salt { int cipher_type; // FIXME: cipher_type seems to be ignored int checksum_type; int iterations; int key_size; int iv_length; int salt_length; int original_length; int length; unsigned char iv[16]; unsigned char salt[32]; unsigned char content[1024]; } *cur_salt; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int res; if (strncmp(ciphertext, "$sxc$*", 6)) return 0; /* handle 'chopped' .pot lines */ if (ldr_isa_pot_source(ciphertext)) return 1; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 6; if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; res = atoi(p); if (res <= 0) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* key size */ goto err; res = atoi(p); if (res != 16 && res != 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */ goto err; if (hexlenl(p) != BINARY_SIZE * 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv length */ goto err; res = atoi(p); if (res <= 0 || res > 16) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv */ goto err; if (hexlenl(p) != res * 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt length */ goto err; res = atoi(p); if (res <= 0 || res > 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if (hexlenl(p) != res * 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* original length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) /* 1024 because of "unsigned char output[1024];" in crypt_all */ goto err; if ((p = strtokm(NULL, "*")) == NULL) /* length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* content */ goto err; if (hexlenl(p) != res * 2) goto err; if (strtokm(NULL, "*") != NULL) /* the end */ goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 6; /* skip over "$sxc$*" */ p = strtokm(ctcopy, "*"); cs.cipher_type = atoi(p); p = strtokm(NULL, "*"); cs.checksum_type = atoi(p); p = strtokm(NULL, "*"); cs.iterations = atoi(p); p = strtokm(NULL, "*"); cs.key_size = atoi(p); strtokm(NULL, "*"); /* skip checksum field */ p = strtokm(NULL, "*"); cs.iv_length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.iv_length; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.salt_length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.salt_length; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.original_length = atoi(p); p = strtokm(NULL, "*"); cs.length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.length; i++) cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; ctcopy += 6; /* skip over "$sxc$*" */ strtokm(ctcopy, "*"); strtokm(NULL, "*"); strtokm(NULL, "*"); strtokm(NULL, "*"); p = strtokm(NULL, "*"); for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } MEM_FREE(keeptr); return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char key[MAX_KEYS_PER_CRYPT][32]; unsigned char hash[MAX_KEYS_PER_CRYPT][32]; BF_KEY bf_key; int bf_ivec_pos; unsigned char ivec[8]; unsigned char output[1024]; int i; SHA_CTX ctx; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char *)saved_key[index+i], strlen(saved_key[index+i])); SHA1_Final((unsigned char *)hash[i], &ctx); } #ifdef SIMD_COEF_32 { int lens[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = 20; pin[i] = (unsigned char*)hash[i]; pout[i] = key[i]; } pbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, pout, cur_salt->key_size, 0); } #else pbkdf2_sha1(hash[0], 20, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, key[0], cur_salt->key_size, 0); #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { bf_ivec_pos = 0; memcpy(ivec, cur_salt->iv, 8); BF_set_key(&bf_key, cur_salt->key_size, key[i]); BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0); SHA1_Init(&ctx); SHA1_Update(&ctx, output, cur_salt->original_length); SHA1_Final((unsigned char*)crypt_out[index+i], &ctx); } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void sxc_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations; } struct fmt_main fmt_sxc = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, sxc_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, sxc_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
iw_core.c
/* // Copyright 2016-2018 Intel Corporation All Rights Reserved. // // The source code, information and material ("Material") contained herein is // owned by Intel Corporation or its suppliers or licensors, and title // to such Material remains with Intel Corporation or its suppliers or // licensors. The Material contains proprietary information of Intel // or its suppliers and licensors. The Material is protected by worldwide // copyright laws and treaty provisions. No part of the Material may be used, // copied, reproduced, modified, published, uploaded, posted, transmitted, // distributed or disclosed in any way without Intel's prior express written // permission. No license under any patent, copyright or other intellectual // property rights in the Material is granted to or conferred upon you, // either expressly, by implication, inducement, estoppel or otherwise. // Any license under such intellectual property rights must be express and // approved by Intel in writing. // // Unless otherwise agreed by Intel in writing, // you may not remove or alter this notice or any other notice embedded in // Materials by Intel or Intel's suppliers or licensors in any way. // */ #include "iw_own.h" #include "iw/iw_image.h" #if defined _WIN32 #include <malloc.h> #include <intrin.h> #else #ifdef _OPENMP #if (defined __GNUC__) && !(defined __clang__) #define GCC_VERSION (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__) #if (GCC_VERSION >= 40700) #define OWN_ALLOW_OMP_ATOMICS #endif #undef GCC_VERSION #else #define OWN_ALLOW_OMP_ATOMICS #endif #endif #ifdef OWN_ALLOW_OMP_ATOMICS #include <omp.h> // Use OMP atomics #else #if (defined __clang__ && defined __has_include) #if !__has_include(<stdatomic.h>) #ifndef __STDC_NO_ATOMICS__ #define __STDC_NO_ATOMICS__ #endif #endif #elif (defined __GNUC__) #define GCC_VERSION (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__) #if (GCC_VERSION < 40900) #ifndef __STDC_NO_ATOMICS__ #define __STDC_NO_ATOMICS__ #endif #endif #undef GCC_VERSION #endif #if !defined __STDC_NO_ATOMICS__ #include <stdatomic.h> #ifndef __ATOMIC_ACQ_REL #define __ATOMIC_ACQ_REL 4 #endif #else #pragma message("Atomic operations are not supported by this compiler. Some features my not be thread-safe.") #endif #endif #ifndef __APPLE__ #include <malloc.h> #endif #endif /* ///////////////////////////////////////////////////////////////////////////// // IW DLL entry points ///////////////////////////////////////////////////////////////////////////// */ #ifdef IW_BUILD_DLL #if defined _WIN32 #include <Windows.h> int WINAPI DllMain( HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved ) { switch( fdwReason ) { case DLL_PROCESS_ATTACH: break; case DLL_THREAD_ATTACH: break; case DLL_THREAD_DETACH: break; case DLL_PROCESS_DETACH: break; default: break; } return 1; UNREFERENCED_PARAMETER(hinstDLL); UNREFERENCED_PARAMETER(lpvReserved); } #elif defined __unix__ int _init(void) { return 1; } void _fini(void) { } #elif defined __APPLE__ __attribute__((constructor)) void initializer( void ) { static int initialized = 0; if(!initialized) { initialized = 1; } return; } __attribute__((destructor)) void destructor() { } #endif #endif /* ///////////////////////////////////////////////////////////////////////////// // Base IW definitions ///////////////////////////////////////////////////////////////////////////// */ IW_DECL(int) iwTypeToSize(IppDataType dataType) { switch(dataType) { case ipp8u: case ipp8s: return 1; case ipp8uc: case ipp8sc: case ipp16u: case ipp16s: return 2; case ipp16uc: case ipp16sc: case ipp32u: case ipp32s: case ipp32f: return 4; case ipp32uc: case ipp32sc: case ipp32fc: case ipp64u: case ipp64s: case ipp64f: return 8; case ipp64uc: case ipp64sc: case ipp64fc: return 16; default: return 0; } } IW_DECL(double) iwTypeGetMin(IppDataType type) { switch(type) { case ipp8u: return IPP_MIN_8U; case ipp8s: return IPP_MIN_8S; case ipp16u: return IPP_MIN_16U; case ipp16s: return IPP_MIN_16S; case ipp32u: return IPP_MIN_32U; case ipp32s: return IPP_MIN_32S; case ipp32f: return -IPP_MAXABS_32F; case ipp64f: return -IPP_MAXABS_64F; default: return 0; } } IW_DECL(double) iwTypeGetMax(IppDataType type) { switch(type) { case ipp8u: return IPP_MAX_8U; case ipp8s: return IPP_MAX_8S; case ipp16u: return IPP_MAX_16U; case ipp16s: return IPP_MAX_16S; case ipp32u: return IPP_MAX_32U; case ipp32s: return IPP_MAX_32S; case ipp32f: return IPP_MAXABS_32F; case ipp64f: return IPP_MAXABS_64F; default: return 0; } } IW_DECL(double) iwTypeGetRange(IppDataType type) { switch(type) { case ipp8u: return ((double)IPP_MAX_8U - IPP_MIN_8U); case ipp8s: return ((double)IPP_MAX_8S - IPP_MIN_8S); case ipp16u: return ((double)IPP_MAX_16U - IPP_MIN_16U); case ipp16s: return ((double)IPP_MAX_16S - IPP_MIN_16S); case ipp32u: return ((double)IPP_MAX_32U - IPP_MIN_32U); case ipp32s: return ((double)IPP_MAX_32S - IPP_MIN_32S); default: return 0; } } IW_DECL(int) iwTypeIsFloat(IppDataType type) { return (type == ipp64f || type == ipp64fc || type == ipp32f || type == ipp32fc)?1:0; } IW_DECL(int) iwTypeIsSigned(IppDataType type) { return (type == ipp64f || type == ipp64fc || type == ipp64s || type == ipp64sc || type == ipp32f || type == ipp32fc || type == ipp32s || type == ipp32sc || type == ipp16s || type == ipp16sc || type == ipp8s || type == ipp8sc)?1:0; } IW_DECL(double) iwValueSaturate(double val, IppDataType dstType) { switch(dstType) { case ipp8u: return (double)ownCast_64f8u(val); case ipp8s: return (double)ownCast_64f8s(val); case ipp16u: return (double)ownCast_64f16u(val); case ipp16s: return (double)ownCast_64f16s(val); case ipp32u: return (double)ownCast_64f32u(val); case ipp32s: return (double)ownCast_64f32s(val); default: return val; } } IW_DECL(double) iwValueRelToAbs(double val, IppDataType type) { if(iwTypeIsFloat(type)) return val; else { double min = iwTypeGetMin(type); double max = iwTypeGetMax(type); return (max - min)*val + min; } } IW_DECL(double) iwValueAbsToRel(double val, IppDataType type) { if(iwTypeIsFloat(type)) return val; else { double min = iwTypeGetMin(type); double max = iwTypeGetMax(type); return (val - min)/(max - min); } } IW_DECL(double) iwRangeWeightCorrector(IppDataType type) { if(iwTypeIsSigned(type) && !iwTypeIsFloat(type)) { double min = iwTypeGetMin(type); double max = iwTypeGetMax(type); double range = iwTypeGetRange(type); if(range) return (-min-max)/range; else return 0; } return 0; } /* ///////////////////////////////////////////////////////////////////////////// // IwAtomic - Atomic operations layer ///////////////////////////////////////////////////////////////////////////// */ IW_DECL(int) iwAtomic_AddInt(int *pInt, int delta) { #if defined _WIN32 return _InterlockedExchangeAdd((long volatile*)pInt, delta); #else #ifdef OWN_ALLOW_OMP_ATOMICS int ret; #pragma omp atomic capture { ret = *pInt; *pInt += delta; } return ret; #else #if defined __APPLE__ && !defined __STDC_NO_ATOMICS__ return __atomic_fetch_add(pInt, delta, __ATOMIC_ACQ_REL); #elif defined __GNUC__ && !defined __STDC_NO_ATOMICS__ return __atomic_fetch_add(pInt, delta, __ATOMIC_ACQ_REL); #else int ret = *pInt; *pInt += delta; return ret; #endif #endif #endif } /* ///////////////////////////////////////////////////////////////////////////// // IW version info ///////////////////////////////////////////////////////////////////////////// */ IW_DECL(void) iwGetLibVersion(IwVersion *pVersion) { if(!pVersion) return; pVersion->m_major = IW_VERSION_MAJOR; pVersion->m_minor = IW_VERSION_MINOR; pVersion->m_update = IW_VERSION_UPDATE; pVersion->m_versionStr = IW_VERSION_STR; pVersion->m_pIppVersion = ippiGetLibVersion(); #ifdef IW_PREBUILT pVersion->m_bUserBuild = 0; #else pVersion->m_bUserBuild = 1; #endif } /* ///////////////////////////////////////////////////////////////////////////// // IW status ///////////////////////////////////////////////////////////////////////////// */ IW_DECL(const char*) iwGetStatusString(IppStatus status) { #ifdef ICV_BASE (void)status; return "Status messages are not supported"; #else if(status <= iwStsErr) return ippGetStatusString(status); else if(status >= iwStsWrn) return ippGetStatusString(status); else return ippGetStatusString(status); #endif }
ssytrf.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zhetrf.c, normal z -> s, Fri Sep 28 17:38:07 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" #include <string.h> /***************************************************************************//** * * @ingroup plasma_hetrf * * Factorize a symmetric matrix A using a 'communication avoiding' Aasen's * algorithm, followed by band LU factorization. The factorization has the form * * \f[ A = P \times L \times T \times L^T \times P^T, \f] * or * \f[ A = P \times U^T \times T \times U \times P^T, \f] * * where U is a unit-diagonal upper triangular matrix and L is a unit-diagonal * lower triangular matrix, T is a band matrix, and P is a permutation matrix. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * TODO: only support Lower for now * * @param[in] n * The order of the matrix A. n >= 0. * * @param[in,out] pA * On entry, the symmetric matrix A. * If uplo = PlasmaUpper, the leading N-by-N upper triangular part of A * contains the upper triangular part of the matrix A, and the strictly * lower triangular part of A is not referenced. * If uplo = PlasmaLower, the leading N-by-N lower triangular part of A * contains the lower triangular part of the matrix A, and the strictly * upper triangular part of A is not referenced. * On exit, if return value = 0, the factor U or L from the Aasen's * factorization A = (P*U^T)*T*(P*U^T)^T or A = (P*L)*T*(P*L)^T. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,n). * * @param[out] pT * On exit, if return value = 0, the LU factors of the band matrix T. * * @param[in] ldt * The leading dimension of the array T. * * @param[out] ipiv * The pivot indices used by Aasen's algorithm; for 1 <= i <= min(m,n), * row and column i of the matrix was interchanged with row and column ipiv(i). * * @param[out] ipiv2 * The pivot indices used by the band LU; for 1 <= i <= min(m,n), * row and column i of the matrix was interchanged with row and column ipiv(i). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * @retval > 0 if i, the leading minor of order i of A is not * positive definite, so the factorization could not * be completed, and the solution has not been computed. * ******************************************************************************* * * @sa plasma_omp_ssytrf * @sa plasma_chetrf * @sa plasma_dhetrf * @sa plasma_shetrf * ******************************************************************************/ int plasma_ssytrf(plasma_enum_t uplo, int n, float *pA, int lda, int *ipiv, float *pT, int ldt, int *ipiv2) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (//(uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo (Upper not supported, yet)"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -4; } // quick return if (imax(n, 0) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_hetrf(plasma, PlasmaRealFloat, n); // Set tiling parameters. int nb = plasma->nb; // Adjust max number of panel threads int max_panel_threads_gbtrf = 1; int max_panel_threads_hetrf = 1; if (plasma->max_panel_threads > 3) { max_panel_threads_gbtrf = 2; } max_panel_threads_hetrf = imax(1, plasma->max_panel_threads - max_panel_threads_gbtrf); plasma->max_panel_threads = max_panel_threads_hetrf; // Initialize barrier plasma_barrier_init(&plasma->barrier); // Create tile matrix. plasma_desc_t A; plasma_desc_t T; plasma_desc_t W; int retval; retval = plasma_desc_triangular_create(PlasmaRealFloat, uplo, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // band matrix (general band to prepare for band solve) retval = plasma_desc_general_band_create(PlasmaRealFloat, PlasmaGeneral, nb, nb, ldt, n, 0, 0, n, n, nb, nb, &T); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_band_create() failed"); return retval; } // workspace int tot = 3; int ldw = (1+(4+tot)*A.mt)*nb; // block column retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb, ldw, nb, 0, 0, ldw, nb, &W); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // Initialize data. memset(T.matrix, 0, ldt*n*sizeof(float)); memset(W.matrix, 0, ldw*nb*sizeof(float)); for (int i = 0; i < nb; i++) ipiv[i] = 1+i; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_str2desc(pA, lda, A, &sequence, &request); } // implicit synchronization #pragma omp parallel #pragma omp master { // Call the tile async function to compute LTL^T factor of A, // where T is a band matrix plasma_omp_ssytrf(uplo, A, ipiv, T, ipiv2, W, &sequence, &request); } // implicit synchronization #pragma omp parallel #pragma omp master { // Translate back to LAPACK layout. plasma_omp_sdesc2tr(A, pA, lda, &sequence, &request); plasma_omp_sdesc2pb(T, pT, ldt, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&T); plasma_desc_destroy(&W); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_hetrf * * Factorize a symmetric matrix. * Non-blocking tile version of plasma_ssytrf(). * May return before the computation is finished. * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] A * On entry, the symmetric matrix A. * If uplo = PlasmaUpper, the leading N-by-N upper triangular part of A * contains the upper triangular part of the matrix A, and the strictly * lower triangular part of A is not referenced. * If uplo = PlasmaLower, the leading N-by-N lower triangular part of A * contains the lower triangular part of the matrix A, and the strictly * upper triangular part of A is not referenced. * On exit, if return value = 0, the factor U or L from the Cholesky * factorization A = (P*U^T)*T(P*U^T)^T or A = (P*L)*T(P*L)^T. * * @param[out] T * On exit, if return value = 0, the band matrix T of the factorization * factorization A = (P*U^T)*T*(P*U^T)^T or A = (P*L)*T*(P*L)^T. * * @param[out] ipiv * The pivot indices; for 1 <= i <= min(m,n), row and column i of the * matrix was interchanged with row and column ipiv(i). * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_ssytrf * @sa plasma_omp_ssytrf * @sa plasma_omp_chetrf * @sa plasma_omp_dhetrf * @sa plasma_omp_shetrf * ******************************************************************************/ void plasma_omp_ssytrf(plasma_enum_t uplo, plasma_desc_t A, int *ipiv, plasma_desc_t T, int *ipiv2, plasma_desc_t W, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (//(uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo (Upper not supported, yet)"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.m == 0) return; // Call the parallel function. plasma_pssytrf_aasen(uplo, A, ipiv, T, W, sequence, request); plasma_psgbtrf(T, ipiv2, sequence, request); }
GB_binop__rdiv_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_int32) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int32) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int32) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int32) // A*D function (colscale): GB (_AxD__rdiv_int32) // D*A function (rowscale): GB (_DxB__rdiv_int32) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int32) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int32) // C=scalar+B GB (_bind1st__rdiv_int32) // C=scalar+B' GB (_bind1st_tran__rdiv_int32) // C=A+scalar GB (_bind2nd__rdiv_int32) // C=A'+scalar GB (_bind2nd_tran__rdiv_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 32) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_SIGNED (y, x, 32) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_INT32 || GxB_NO_RDIV_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_SIGNED (bij, x, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_SIGNED (y, aij, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (aij, x, 32) ; \ } GrB_Info GB (_bind1st_tran__rdiv_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (y, aij, 32) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
conv_2d.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_KERNELS_CONV_2D_H_ #define MACE_KERNELS_CONV_2D_H_ #if defined(MACE_ENABLE_NEON) && defined(__aarch64__) #include <arm_neon.h> #endif #include <algorithm> #include <functional> #include <limits> #include <memory> #include <tuple> #include <vector> #include "mace/core/future.h" #include "mace/core/tensor.h" #include "mace/kernels/activation.h" #include "mace/kernels/conv_pool_2d_util.h" #include "mace/kernels/arm/conv_2d_neon.h" #include "mace/kernels/arm/conv_winograd.h" #include "mace/kernels/gemmlowp_util.h" #include "mace/kernels/quantize.h" #include "mace/utils/utils.h" #ifdef MACE_ENABLE_OPENCL #include "mace/core/runtime/opencl/cl2_header.h" #endif // MACE_ENABLE_OPENCL namespace mace { namespace kernels { struct Conv2dFunctorBase { Conv2dFunctorBase(const int *strides, const Padding &padding_type, const std::vector<int> &paddings, const int *dilations, const ActivationType activation, const float relux_max_limit) : strides_(strides), padding_type_(padding_type), paddings_(paddings), dilations_(dilations), activation_(activation), relux_max_limit_(relux_max_limit) {} const int *strides_; // [stride_h, stride_w] const Padding padding_type_; std::vector<int> paddings_; const int *dilations_; // [dilation_h, dilation_w] const ActivationType activation_; const float relux_max_limit_; }; template<DeviceType D, typename T> struct Conv2dFunctor; template<> struct Conv2dFunctor<DeviceType::CPU, float> : Conv2dFunctorBase { Conv2dFunctor(const int *strides, const Padding &padding_type, const std::vector<int> &paddings, const int *dilations, const ActivationType activation, const float relux_max_limit, const bool is_filter_transformed, ScratchBuffer *scratch) : Conv2dFunctorBase(strides, padding_type, paddings, dilations, activation, relux_max_limit), is_filter_transformed_(is_filter_transformed), scratch_(scratch) {} void Conv2dGeneral(const float *input, const float *filter, const index_t *in_shape, const index_t *out_shape, const index_t *filter_shape, const int *stride_hw, const int *dilation_hw, float *output) { const index_t in_image_size = in_shape[2] * in_shape[3]; const index_t out_image_size = out_shape[2] * out_shape[3]; const index_t in_batch_size = filter_shape[1] * in_image_size; const index_t out_batch_size = filter_shape[0] * out_image_size; const index_t filter_size = filter_shape[2] * filter_shape[3]; #pragma omp parallel for collapse(2) for (index_t b = 0; b < in_shape[0]; b++) { for (index_t m = 0; m < filter_shape[0]; m += 4) { const index_t in_width = in_shape[3]; const index_t out_height = out_shape[2]; const index_t out_width = out_shape[3]; const index_t out_channels = filter_shape[0]; const index_t in_channels = filter_shape[1]; const int stride_h = stride_hw[0]; const int stride_w = stride_hw[1]; const int dilation_h = dilation_hw[0]; const int dilation_w = dilation_hw[1]; if (m + 3 < out_channels) { float *out_ptr0_base = output + b * out_batch_size + m * out_image_size; float *out_ptr1_base = out_ptr0_base + out_image_size; float *out_ptr2_base = out_ptr1_base + out_image_size; float *out_ptr3_base = out_ptr2_base + out_image_size; for (index_t c = 0; c < in_channels; ++c) { const float *in_ptr_base = input + b * in_batch_size + c * in_image_size; const float *filter_ptr0 = filter + m * in_channels * filter_size + c * filter_size; const float *filter_ptr1 = filter_ptr0 + in_channels * filter_size; const float *filter_ptr2 = filter_ptr1 + in_channels * filter_size; const float *filter_ptr3 = filter_ptr2 + in_channels * filter_size; for (index_t h = 0; h < out_height; ++h) { for (index_t w = 0; w + 3 < out_width; w += 4) { // input offset index_t ih = h * stride_h; index_t iw = w * stride_w; index_t in_offset = ih * in_width + iw; // output (4 outch x 1 height x 4 width): vo_outch_height float vo0[4], vo1[4], vo2[4], vo3[4]; // load output index_t out_offset = h * out_width + w; for (index_t ow = 0; ow < 4; ++ow) { vo0[ow] = out_ptr0_base[out_offset + ow]; vo1[ow] = out_ptr1_base[out_offset + ow]; vo2[ow] = out_ptr2_base[out_offset + ow]; vo3[ow] = out_ptr3_base[out_offset + ow]; } // calc by row for (index_t kh = 0; kh < filter_shape[2]; ++kh) { for (index_t kw = 0; kw < filter_shape[3]; ++kw) { // outch 0 vo0[0] += in_ptr_base[in_offset + kw * dilation_w] * filter_ptr0[kw]; vo0[1] += in_ptr_base[in_offset + stride_w + kw * dilation_w] * filter_ptr0[kw]; vo0[2] += in_ptr_base[in_offset + 2 * stride_w + kw * dilation_w] * filter_ptr0[kw]; vo0[3] += in_ptr_base[in_offset + 3 * stride_w + kw * dilation_w] * filter_ptr0[kw]; // outch 1 vo1[0] += in_ptr_base[in_offset + kw * dilation_w] * filter_ptr1[kw]; vo1[1] += in_ptr_base[in_offset + stride_w + kw * dilation_w] * filter_ptr1[kw]; vo1[2] += in_ptr_base[in_offset + 2 * stride_w + kw * dilation_w] * filter_ptr1[kw]; vo1[3] += in_ptr_base[in_offset + 3 * stride_w + kw * dilation_w] * filter_ptr1[kw]; // outch 2 vo2[0] += in_ptr_base[in_offset + kw * dilation_w] * filter_ptr2[kw]; vo2[1] += in_ptr_base[in_offset + stride_w + kw * dilation_w] * filter_ptr2[kw]; vo2[2] += in_ptr_base[in_offset + 2 * stride_w + kw * dilation_w] * filter_ptr2[kw]; vo2[3] += in_ptr_base[in_offset + 3 * stride_w + kw * dilation_w] * filter_ptr2[kw]; // outch 3 vo3[0] += in_ptr_base[in_offset + kw * dilation_w] * filter_ptr3[kw]; vo3[1] += in_ptr_base[in_offset + stride_w + kw * dilation_w] * filter_ptr3[kw]; vo3[2] += in_ptr_base[in_offset + 2 * stride_w + kw * dilation_w] * filter_ptr3[kw]; vo3[3] += in_ptr_base[in_offset + 3 * stride_w + kw * dilation_w] * filter_ptr3[kw]; } // kw in_offset += dilation_h * in_width; filter_ptr0 += filter_shape[3]; filter_ptr1 += filter_shape[3]; filter_ptr2 += filter_shape[3]; filter_ptr3 += filter_shape[3]; } // kh for (index_t ow = 0; ow < 4; ++ow) { out_ptr0_base[out_offset + ow] = vo0[ow]; out_ptr1_base[out_offset + ow] = vo1[ow]; out_ptr2_base[out_offset + ow] = vo2[ow]; out_ptr3_base[out_offset + ow] = vo3[ow]; } filter_ptr0 -= filter_size; filter_ptr1 -= filter_size; filter_ptr2 -= filter_size; filter_ptr3 -= filter_size; } // w } // h } // c } else { for (index_t mm = m; mm < out_channels; ++mm) { float *out_ptr0_base = output + b * out_batch_size + mm * out_image_size; for (index_t c = 0; c < in_channels; ++c) { const float *in_ptr_base = input + b * in_batch_size + c * in_image_size; const float *filter_ptr0 = filter + mm * in_channels * filter_size + c * filter_size; for (index_t h = 0; h < out_height; ++h) { for (index_t w = 0; w + 3 < out_width; w += 4) { // input offset index_t ih = h * stride_h; index_t iw = w * stride_w; index_t in_offset = ih * in_width + iw; // output (1 outch x 1 height x 4 width): vo_outch_height float vo0[4]; // load output index_t out_offset = h * out_width + w; for (index_t ow = 0; ow < 4; ++ow) { vo0[ow] = out_ptr0_base[out_offset + ow]; } // calc by row for (index_t kh = 0; kh < filter_shape[2]; ++kh) { for (index_t kw = 0; kw < filter_shape[3]; ++kw) { // outch 0 vo0[0] += in_ptr_base[in_offset + kw * dilation_w] * filter_ptr0[kw]; vo0[1] += in_ptr_base[in_offset + stride_w + kw * dilation_w] * filter_ptr0[kw]; vo0[2] += in_ptr_base[in_offset + 2 * stride_w + kw * dilation_w] * filter_ptr0[kw]; vo0[3] += in_ptr_base[in_offset + 3 * stride_w + kw * dilation_w] * filter_ptr0[kw]; } // kw in_offset += dilation_h * in_width; filter_ptr0 += filter_shape[3]; } // kh for (index_t ow = 0; ow < 4; ++ow) { out_ptr0_base[out_offset + ow] = vo0[ow]; } filter_ptr0 -= filter_size; } // w } // h } // c } // mm } // if } // m } // b } MaceStatus operator()(const Tensor *input, const Tensor *filter, const Tensor *bias, Tensor *output, StatsFuture *future) { MACE_UNUSED(future); MACE_CHECK_NOTNULL(input); MACE_CHECK_NOTNULL(filter); MACE_CHECK_NOTNULL(output); std::vector<index_t> filter_shape(4); if (is_filter_transformed_) { // TOC -> OIHW filter_shape[0] = filter->dim(1); filter_shape[1] = filter->dim(2); filter_shape[2] = filter_shape[3] = 3; } else { filter_shape = filter->shape(); } std::vector<index_t> output_shape(4); std::vector<int> paddings(2); if (paddings_.empty()) { CalcNCHWPaddingAndOutputSize(input->shape().data(), filter_shape.data(), dilations_, strides_, padding_type_, output_shape.data(), paddings.data()); } else { paddings = paddings_; CalcNCHWOutputSize(input->shape().data(), filter_shape.data(), paddings_.data(), dilations_, strides_, RoundType::FLOOR, output_shape.data()); } MACE_RETURN_IF_ERROR(output->Resize(output_shape)); index_t batch = output->dim(0); index_t channels = output->dim(1); index_t height = output->dim(2); index_t width = output->dim(3); index_t input_batch = input->dim(0); index_t input_channels = input->dim(1); index_t input_height = input->dim(2); index_t input_width = input->dim(3); index_t filter_h = filter_shape[2]; index_t filter_w = filter_shape[3]; MACE_CHECK(filter_shape[0] == channels, filter_shape[0], " != ", channels); MACE_CHECK(filter_shape[1] == input_channels, filter_shape[1], " != ", input_channels); index_t stride_h = strides_[0]; index_t stride_w = strides_[1]; index_t dilation_h = dilations_[0]; index_t dilation_w = dilations_[1]; MACE_CHECK(batch == input_batch, "Input/Output batch size mismatch"); index_t padded_input_height = input_height + paddings[0]; index_t padded_input_width = input_width + paddings[1]; index_t extra_input_height = padded_input_height; index_t extra_input_width = padded_input_width; index_t extra_output_height = height; index_t extra_output_width = width; int pad_top = paddings[0] >> 1; int pad_bottom = paddings[0] - pad_top; int pad_left = paddings[1] >> 1; int pad_right = paddings[1] - pad_left; Tensor::MappingGuard input_guard(input); Tensor::MappingGuard filter_guard(filter); Tensor::MappingGuard bias_guard(bias); Tensor::MappingGuard output_guard(output); auto filter_data = filter->data<float>(); auto bias_data = bias == nullptr ? nullptr : bias->data<float>(); auto output_data = output->mutable_data<float>(); std::function<void(const float *input, float *output)> conv_func; bool use_winograd = is_filter_transformed_ || (filter_h == 3 && filter_w == 3 && stride_h == 1 && stride_w == 1 && dilation_h == 1 && dilation_w == 1 && input_channels >= 8 && channels >= 8); bool use_neon_3x3_s1 = filter_h == 3 && filter_w == 3 && stride_h == 1 && stride_w == 1 && dilation_h == 1 && dilation_w == 1; bool use_neon_3x3_s2 = filter_h == 3 && filter_w == 3 && stride_h == 2 && stride_w == 2 && dilation_h == 1 && dilation_w == 1; bool use_neon_1x1_s1 = filter_h == 1 && filter_w == 1 && stride_h == 1 && stride_w == 1 && dilation_h == 1 && dilation_w == 1; bool use_neon_5x5_s1 = filter_h == 5 && filter_w == 5 && stride_h == 1 && stride_w == 1 && dilation_h == 1 && dilation_w == 1; bool use_neon_1x7_s1 = filter_h == 1 && filter_w == 7 && stride_h == 1 && stride_w == 1 && dilation_h == 1 && dilation_w == 1; bool use_neon_7x1_s1 = filter_h == 7 && filter_w == 1 && stride_h == 1 && stride_w == 1 && dilation_h == 1 && dilation_w == 1; bool use_neon_7x7_s1 = filter_h == 7 && filter_w == 7 && stride_h == 1 && stride_w == 1 && dilation_h == 1 && dilation_w == 1; bool use_neon_7x7_s2 = filter_h == 7 && filter_w == 7 && stride_h == 2 && stride_w == 2 && dilation_h == 1 && dilation_w == 1; bool use_neon_7x7_s3 = filter_h == 7 && filter_w == 7 && stride_h == 3 && stride_w == 3 && dilation_h == 1 && dilation_w == 1; bool use_neon_1x15_s1 = filter_h == 1 && filter_w == 15 && stride_h == 1 && stride_w == 1 && dilation_h == 1 && dilation_w == 1; bool use_neon_15x1_s1 = filter_h == 15 && filter_w == 1 && stride_h == 1 && stride_w == 1 && dilation_h == 1 && dilation_w == 1; std::vector<index_t> transformed_input_shape; std::vector<index_t> transformed_output_shape; std::vector<index_t> transformed_filter_shape; // When size of input feature map is bigger than 16x16, // set winograd out tile size to 6 to get higher performance. index_t winograd_out_tile_size = 2; if (input_height > 16 && input_width > 16) { winograd_out_tile_size = 6; } if (use_winograd) { extra_output_height = RoundUp<index_t>(height, winograd_out_tile_size); extra_input_height = std::max(padded_input_height, extra_output_height + 2); extra_output_width = RoundUp<index_t>(width, winograd_out_tile_size); extra_input_width = std::max(padded_input_width, extra_output_width + 2); if (extra_input_height != padded_input_height) { pad_bottom += (extra_input_height - padded_input_height); } if (extra_input_width != padded_input_width) { pad_right += (extra_input_width - padded_input_width); } index_t tile_height_count = extra_output_height / winograd_out_tile_size; index_t tile_width_count = extra_output_width / winograd_out_tile_size; index_t tile_count = tile_height_count * tile_width_count; index_t in_tile_area = (winograd_out_tile_size + 2) * (winograd_out_tile_size + 2); transformed_input_shape.insert(transformed_input_shape.end(), {in_tile_area, batch, input_channels, tile_count}); transformed_output_shape.insert(transformed_output_shape.end(), {in_tile_area, batch, channels, tile_count}); transformed_filter_shape.insert(transformed_filter_shape.end(), {in_tile_area, channels, input_channels}); } else { index_t tile_h, tile_w; if (use_neon_1x1_s1) { tile_h = 1; tile_w = 1; } else if (use_neon_3x3_s1) { tile_h = 2; tile_w = 4; } else if (use_neon_7x1_s1 || use_neon_15x1_s1) { tile_h = 4; tile_w = 1; } else { tile_h = 1; tile_w = 4; } extra_output_height = RoundUp<index_t>(height, tile_h); extra_input_height = std::max(padded_input_height, (extra_output_height - 1) * stride_h + (filter_h - 1) * dilation_h + 1); extra_output_width = RoundUp<index_t>(width, tile_w); extra_input_width = std::max(padded_input_width, (extra_output_width - 1) * stride_w + (filter_w - 1) * dilation_w + 1); if (extra_input_height != padded_input_height) { pad_bottom += (extra_input_height - padded_input_height); } if (extra_input_width != padded_input_width) { pad_right += (extra_input_width - padded_input_width); } } // decide scratch size before allocate it index_t total_scratch_size = 0; index_t transformed_input_size = 0; index_t transformed_output_size = 0; index_t padded_input_size = 0; index_t padded_output_size = 0; if (use_winograd) { transformed_input_size = std::accumulate(transformed_input_shape.begin(), transformed_input_shape.end(), 1, std::multiplies<index_t>()) * sizeof(float); transformed_output_size = std::accumulate(transformed_output_shape.begin(), transformed_output_shape.end(), 1, std::multiplies<index_t>()) * sizeof(float); total_scratch_size += transformed_input_size + transformed_output_size; } if (extra_input_height != input_height || extra_input_width != input_width) { padded_input_size = batch * input_channels * (input_height + pad_top + pad_bottom) * (input_width + pad_left + pad_right) * sizeof(float) + MACE_EXTRA_BUFFER_PAD_SIZE; total_scratch_size += padded_input_size; } if (extra_output_height != height || extra_output_width != width) { padded_output_size = batch * channels * extra_output_height * extra_output_width * sizeof(float); total_scratch_size += padded_output_size; } // Init scratch buffer scratch_->Rewind(); scratch_->GrowSize(total_scratch_size); Tensor transformed_input(scratch_->Scratch(transformed_input_size), DT_FLOAT); Tensor transformed_output(scratch_->Scratch(transformed_output_size), DT_FLOAT); Tensor padded_input(scratch_->Scratch(padded_input_size), DT_FLOAT); Tensor padded_output(scratch_->Scratch(padded_output_size), DT_FLOAT); const index_t extra_input_shape[4] = {batch, input_channels, extra_input_height, extra_input_width}; const index_t extra_output_shape[4] = {batch, channels, extra_output_height, extra_output_width}; // make host compiler happy MACE_UNUSED(extra_input_shape); MACE_UNUSED(extra_output_shape); // decide which convolution function to call if (use_winograd) { transformed_input.Reshape(transformed_input_shape); transformed_output.Reshape(transformed_output_shape); const float *transformed_filter_ptr; if (transformed_filter_.dim_size() == 0) { if (is_filter_transformed_) { transformed_filter_ptr = filter_data; } else { MACE_RETURN_IF_ERROR(transformed_filter_.Resize( transformed_filter_shape)); switch (winograd_out_tile_size) { case 2: TransformFilter4x4(filter_data, filter_shape[1], filter_shape[0], transformed_filter_.mutable_data<float>()); break; case 6: TransformFilter8x8(filter_data, filter_shape[1], filter_shape[0], transformed_filter_.mutable_data<float>()); break; default:MACE_NOT_IMPLEMENTED; } transformed_filter_ptr = transformed_filter_.data<float>(); } } else { transformed_filter_ptr = transformed_filter_.data<float>(); } float *transformed_input_data = transformed_input.mutable_data<float>(); float *transformed_output_data = transformed_output.mutable_data<float>(); conv_func = [=](const float *pad_input, float *pad_output) { WinoGradConv3x3s1(pad_input, transformed_filter_ptr, batch, extra_input_height, extra_input_width, input_channels, channels, winograd_out_tile_size, transformed_input_data, transformed_output_data, pad_output); }; } else if (use_neon_3x3_s1) { conv_func = [=](const float *pad_input, float *pad_output) { Conv2dNeonK3x3S1(pad_input, filter_data, extra_input_shape, extra_output_shape, pad_output); }; } else if (use_neon_3x3_s2) { conv_func = [=](const float *pad_input, float *pad_output) { Conv2dNeonK3x3S2(pad_input, filter_data, extra_input_shape, extra_output_shape, pad_output); }; } else if (use_neon_1x1_s1) { conv_func = [=](const float *pad_input, float *pad_output) { Conv2dNeonK1x1S1(pad_input, filter_data, batch, extra_input_height, extra_input_width, input_channels, channels, pad_output); }; } else if (use_neon_5x5_s1) { conv_func = [=](const float *pad_input, float *pad_output) { Conv2dNeonK5x5S1(pad_input, filter_data, extra_input_shape, extra_output_shape, pad_output); }; } else if (use_neon_1x7_s1) { conv_func = [=](const float *pad_input, float *pad_output) { Conv2dNeonK1x7S1(pad_input, filter_data, extra_input_shape, extra_output_shape, pad_output); }; } else if (use_neon_7x1_s1) { conv_func = [=](const float *pad_input, float *pad_output) { Conv2dNeonK7x1S1(pad_input, filter_data, extra_input_shape, extra_output_shape, pad_output); }; } else if (use_neon_7x7_s1) { conv_func = [=](const float *pad_input, float *pad_output) { Conv2dNeonK7x7S1(pad_input, filter_data, extra_input_shape, extra_output_shape, pad_output); }; } else if (use_neon_7x7_s2) { conv_func = [=](const float *pad_input, float *pad_output) { Conv2dNeonK7x7S2(pad_input, filter_data, extra_input_shape, extra_output_shape, pad_output); }; } else if (use_neon_7x7_s3) { conv_func = [=](const float *pad_input, float *pad_output) { Conv2dNeonK7x7S3(pad_input, filter_data, extra_input_shape, extra_output_shape, pad_output); }; } else if (use_neon_1x15_s1) { conv_func = [=](const float *pad_input, float *pad_output) { Conv2dNeonK1x15S1(pad_input, filter_data, extra_input_shape, extra_output_shape, pad_output); }; } else if (use_neon_15x1_s1) { conv_func = [=](const float *pad_input, float *pad_output) { Conv2dNeonK15x1S1(pad_input, filter_data, extra_input_shape, extra_output_shape, pad_output); }; } else { conv_func = [=](const float *pad_input, float *pad_output) { Conv2dGeneral(pad_input, filter_data, extra_input_shape, extra_output_shape, filter_shape.data(), strides_, dilations_, pad_output); }; } // pad input and output const Tensor *pad_input_ptr = input; if (extra_input_height != input_height || extra_input_width != input_width) { MACE_RETURN_IF_ERROR(ConstructNCHWInputWithSpecificPadding(input, pad_top, pad_bottom, pad_left, pad_right, &padded_input)); pad_input_ptr = &padded_input; } // TODO(libin): don't need clear after bias is integrated in each conv Tensor *pad_output_ptr = output; if (extra_output_height != height || extra_output_width != width) { padded_output.Reshape({batch, channels, extra_output_height, extra_output_width}); padded_output.Clear(); pad_output_ptr = &padded_output; } else if (!use_neon_1x1_s1) { output->Clear(); } const float *pad_input_data = pad_input_ptr->data<float>(); float *pad_output_data = pad_output_ptr->mutable_data<float>(); conv_func(pad_input_data, pad_output_data); // unpack output if (extra_output_height != height || extra_output_width != width) { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch; ++b) { for (index_t c = 0; c < channels; ++c) { for (index_t h = 0; h < height; ++h) { memcpy( output_data + b * channels * height * width + c * height * width + h * width, pad_output_data + b * channels * extra_output_height * extra_output_width + c * extra_output_height * extra_output_width + h * extra_output_width, sizeof(float) * width); } } } } if (bias_data != nullptr) { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch; ++b) { for (index_t c = 0; c < channels; ++c) { for (index_t i = 0; i < height * width; ++i) { output_data[(b * channels + c) * height * width + i] += bias_data[c]; } } } } DoActivation(output_data, output_data, output->size(), activation_, relux_max_limit_); return MACE_SUCCESS; } Tensor transformed_filter_; bool is_filter_transformed_; ScratchBuffer *scratch_; }; template<> struct Conv2dFunctor<DeviceType::CPU, uint8_t> : Conv2dFunctorBase { Conv2dFunctor(const int *strides, const Padding &padding_type, const std::vector<int> &paddings, const int *dilations, const ActivationType activation, const float relux_max_limit, const bool is_filter_transformed, ScratchBuffer *scratch) : Conv2dFunctorBase(strides, padding_type, paddings, dilations, activation, relux_max_limit), scratch_(scratch) { MACE_UNUSED(is_filter_transformed); } template <typename T> inline void Im2col( const T *in_data, const std::vector<index_t> &in_shape, const index_t filter_h, const index_t filter_w, const index_t stride_h, const index_t stride_w, const T zero_point, const int pad_height, const int pad_width, const std::vector<index_t> &out_shape, const index_t depth, T* im2col_data) { const index_t input_row_size = in_shape[2] * in_shape[3]; const index_t patch_row_size = filter_w * in_shape[3]; #pragma omp parallel for collapse(3) for (index_t b = 0; b < out_shape[0]; ++b) { for (index_t h = 0; h < out_shape[1]; ++h) { for (index_t w = 0; w < out_shape[2]; ++w) { // Reshape a patch of input to column, which is corresponding to // a column of output(:, column). const index_t ih_begin = h * stride_h - (pad_height >> 1); const index_t ih_end = ih_begin + filter_h; const index_t iw_begin = w * stride_w - (pad_width >> 1); const index_t iw_end = iw_begin + filter_w; // gate height and width to separate padding const index_t ih_begin_gated = std::max<index_t>(0, ih_begin); const index_t ih_end_gated = std::min<index_t>(ih_end, in_shape[1]); const index_t iw_begin_gated = std::max<index_t>(0, iw_begin); const index_t iw_end_gated = std::min<index_t>(iw_end, in_shape[2]); const index_t pad_top = std::max<index_t>(0, -ih_begin); const index_t pad_bottom = ih_end - ih_end_gated; const index_t pad_left = std::max<index_t>(0, -iw_begin); const index_t pad_right = iw_end - iw_end_gated; index_t im2col_column_offset = ((b * out_shape[1] + h) * out_shape[2] + w) * depth; // fill in padding top if (pad_top > 0) { std::fill_n(im2col_data + im2col_column_offset, pad_top * patch_row_size, zero_point); } const index_t patch_row_size_gated = std::min(filter_w - pad_left, in_shape[2] - iw_begin_gated) * in_shape[3]; MACE_CHECK(patch_row_size_gated == ((filter_w - (pad_left + pad_right)) * in_shape[3])); const index_t pad_left_size = pad_left * in_shape[3]; const index_t pad_right_size = pad_right * in_shape[3]; index_t im2col_offset = im2col_column_offset + (pad_top * filter_w + pad_left) * in_shape[3]; index_t in_offset = ((b * in_shape[1] + ih_begin_gated) * in_shape[2] + iw_begin_gated) * in_shape[3]; // fill in effective rows for (index_t ih = ih_begin_gated; ih < ih_end_gated; ++ih) { // fill in padding left if (pad_left > 0) { const index_t left_offset = im2col_offset - pad_left_size; std::fill_n(im2col_data + left_offset, pad_left_size, zero_point); } // copy effective data std::copy_n(in_data + in_offset, patch_row_size_gated, im2col_data + im2col_offset); // fill in padding right if (pad_right > 0) { const index_t right_offset = im2col_offset + patch_row_size_gated; std::fill_n(im2col_data + right_offset, pad_right_size, zero_point); } in_offset += input_row_size; im2col_offset += patch_row_size; } // fill in padding bottom if (pad_bottom > 0) { const index_t pad_bottom_size = pad_bottom * patch_row_size; const index_t bottom_offset = im2col_column_offset + depth - pad_bottom_size; std::fill_n(im2col_data + bottom_offset, pad_bottom_size, zero_point); } } } } } inline void GetOutputMultiplierAndShift( const float lhs_scale, const float rhs_scale, const float output_scale, int32_t *quantized_multiplier, int *right_shift) { float real_multiplier = lhs_scale * rhs_scale / output_scale; MACE_CHECK(real_multiplier > 0.f && real_multiplier < 1.f, real_multiplier); int exponent; QuantizeMultiplier(real_multiplier, quantized_multiplier, &exponent); *right_shift = -exponent; MACE_CHECK(*right_shift >= 0); } typedef gemmlowp::VectorMap<const int32_t, gemmlowp::VectorShape::Col> ColVectorMap; typedef std::tuple< gemmlowp::OutputStageBiasAddition<ColVectorMap>, gemmlowp::OutputStageQuantizeDownInt32ToUint8ScaleByFixedPoint, gemmlowp::OutputStageSaturatingCastToUint8> Pipeline; inline Pipeline MakeOutputPipeline( const int32_t* bias_data, const index_t channels, const float lhs_scale, const float rhs_scale, const float output_scale, const int32_t output_zero_point) { ColVectorMap bias_vector(bias_data, channels); gemmlowp::OutputStageBiasAddition<ColVectorMap> bias_addition_stage; bias_addition_stage.bias_vector = bias_vector; int32_t quantized_multiplier; int32_t right_shift; GetOutputMultiplierAndShift(lhs_scale, rhs_scale, output_scale, &quantized_multiplier, &right_shift); gemmlowp::OutputStageQuantizeDownInt32ToUint8ScaleByFixedPoint quantize_down_stage; quantize_down_stage.result_offset_after_shift = output_zero_point; quantize_down_stage.result_fixedpoint_multiplier = quantized_multiplier; quantize_down_stage.result_shift = right_shift; gemmlowp::OutputStageSaturatingCastToUint8 saturating_cast_stage; return std::make_tuple(bias_addition_stage, quantize_down_stage, saturating_cast_stage); } MaceStatus operator()(const Tensor *input, // NHWC const Tensor *filter, // OHWI const Tensor *bias, Tensor *output, // NHWC StatsFuture *future) { MACE_UNUSED(future); MACE_CHECK(dilations_[0] == 1 && dilations_[1] == 1, "Quantization convolution does not support dilation > 1 yet."); gemmlowp::GemmContext& gemm_context = GetGemmlowpContext(); std::vector<index_t> output_shape(4); std::vector<int> paddings(2); if (paddings_.empty()) { CalcPaddingAndOutputSize(input->shape().data(), NHWC, filter->shape().data(), OHWI, dilations_, strides_, padding_type_, output_shape.data(), paddings.data()); } else { paddings = paddings_; CalcOutputSize(input->shape().data(), NHWC, filter->shape().data(), OHWI, paddings_.data(), dilations_, strides_, RoundType::FLOOR, output_shape.data()); } MACE_RETURN_IF_ERROR(output->Resize(output_shape)); index_t batch = output->dim(0); index_t height = output->dim(1); index_t width = output->dim(2); index_t channels = output->dim(3); index_t input_batch = input->dim(0); index_t input_channels = input->dim(3); index_t filter_h = filter->dim(1); index_t filter_w = filter->dim(2); index_t stride_h = strides_[0]; index_t stride_w = strides_[1]; const index_t depth = input_channels * filter_h * filter_w; const index_t columns = batch * height * width; MACE_CHECK(filter->dim(0) == channels, filter->dim(0), " != ", channels); MACE_CHECK(filter->dim(3) == input_channels, filter->dim(3), " != ", input_channels); MACE_CHECK(batch == input_batch, "Input/Output batch size mismatch"); Tensor::MappingGuard input_guard(input); Tensor::MappingGuard filter_guard(filter); Tensor::MappingGuard output_guard(output); auto input_data = input->data<uint8_t>(); auto filter_data = filter->data<uint8_t>(); auto output_data = output->mutable_data<uint8_t>(); index_t total_scratch_size = 0; index_t zero_bias_size = channels * sizeof(int32_t); total_scratch_size += (bias == nullptr ? zero_bias_size : 0); index_t im2col_size = depth * columns * sizeof(uint8_t); bool im2col_required = filter_h != 1 || filter_w != 1 || stride_h != 1 || stride_w != 1; total_scratch_size += (im2col_required ? im2col_size : 0); scratch_->Rewind(); scratch_->GrowSize(total_scratch_size); std::unique_ptr<Tensor> zero_bias; const int32_t *bias_data = nullptr; if (bias == nullptr) { zero_bias.reset(new Tensor(scratch_->Scratch(zero_bias_size), DT_INT32)); zero_bias->Reshape({channels}); zero_bias->Clear(); bias_data = zero_bias->data<int32_t>(); } else { bias_data = bias->data<int32_t>(); } std::unique_ptr<Tensor> im2col; auto gemm_input_data = input_data; if (im2col_required) { // prepare im2col im2col.reset(new Tensor(scratch_->Scratch(im2col_size), DT_UINT8)); uint8_t *im2col_data = im2col->mutable_data<uint8_t>(); Im2col(input_data, input->shape(), filter_h, filter_w, stride_h, stride_w, static_cast<uint8_t>(input->zero_point()), paddings[0], paddings[1], output->shape(), depth, im2col_data); gemm_input_data = im2col_data; } const int gemm_filter_rows = static_cast<int>(channels); const int gemm_filter_cols = static_cast<int>(depth); const int gemm_input_rows = static_cast<int>(depth); const int gemm_input_cols = static_cast<int>(columns); const int gemm_output_rows = static_cast<int>(channels); const int gemm_output_cols = static_cast<int>(columns); gemmlowp::MatrixMap<const uint8_t, gemmlowp::MapOrder::RowMajor> filter_matrix(filter_data, gemm_filter_rows, gemm_filter_cols); gemmlowp::MatrixMap<const uint8_t, gemmlowp::MapOrder::ColMajor> input_matrix(gemm_input_data, gemm_input_rows, gemm_input_cols); gemmlowp::MatrixMap<uint8_t, gemmlowp::MapOrder::ColMajor> output_matrix(output_data, gemm_output_rows, gemm_output_cols); const auto &output_pipeline = MakeOutputPipeline( bias_data, channels, filter->scale(), input->scale(), output->scale(), output->zero_point()); using BitDepthParams = gemmlowp::L8R8WithLhsNonzeroBitDepthParams; gemmlowp::GemmWithOutputPipeline<uint8_t, uint8_t, BitDepthParams>( &gemm_context, filter_matrix, input_matrix, &output_matrix, -filter->zero_point(), -input->zero_point(), output_pipeline); return MACE_SUCCESS; } ScratchBuffer *scratch_; }; #ifdef MACE_ENABLE_OPENCL template<typename T> struct Conv2dFunctor<DeviceType::GPU, T> : Conv2dFunctorBase { Conv2dFunctor(const int *strides, const Padding &padding_type, const std::vector<int> &paddings, const int *dilations, const ActivationType activation, const float relux_max_limit, const bool is_filter_transformed, ScratchBuffer *scratch) : Conv2dFunctorBase(strides, padding_type, paddings, dilations, activation, relux_max_limit) { MACE_UNUSED(is_filter_transformed); MACE_UNUSED(scratch); } MaceStatus operator()(const Tensor *input, const Tensor *filter, const Tensor *bias, Tensor *output, StatsFuture *future); cl::Kernel kernel_; uint32_t kwg_size_; std::unique_ptr<BufferBase> kernel_error_; std::vector<index_t> input_shape_; }; #endif // MACE_ENABLE_OPENCL } // namespace kernels } // namespace mace #endif // MACE_KERNELS_CONV_2D_H_
omp_threadprivate.c
<ompts:test> <ompts:testdescription>Test which checks the omp threadprivate directive by filling an array with random numbers in an parallelised region. Each thread generates one number of the array and saves this in a temporary threadprivate variable. In a second parallelised region the test controls, that the temporary variable contains still the former value by comparing it with the one in the array.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp threadprivate</ompts:directive> <ompts:dependences>omp critical,omp_set_dynamic,omp_get_num_threads</ompts:dependences> <ompts:testcode> /* * Threadprivate is tested in 2 ways: * 1. The global variable declared as threadprivate should have * local copy for each thread. Otherwise race condition and * wrong result. * 2. If the value of local copy is retained for the two adjacent * parallel regions */ #include "omp_testsuite.h" #include <stdlib.h> #include <stdio.h> static int sum0=0; static int myvalue = 0; <ompts:check>#pragma omp threadprivate(sum0)</ompts:check> <ompts:check>#pragma omp threadprivate(myvalue)</ompts:check> int <ompts:testcode:functionname>omp_threadprivate</ompts:testcode:functionname>(FILE * logFile) { int sum = 0; int known_sum; int i; int iter; int *data; int size; int failed = 0; int my_random; omp_set_dynamic(0); #pragma omp parallel private(i) { sum0 = 0; #pragma omp for for (i = 1; i <= LOOPCOUNT; i++) { sum0 = sum0 + i; } /*end of for*/ #pragma omp critical { sum = sum + sum0; } /*end of critical */ } /* end of parallel */ known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; if (known_sum != sum ) { fprintf (logFile, " known_sum = %d, sum = %d\n", known_sum, sum); } /* the next parallel region is just used to get the number of threads*/ omp_set_dynamic(0); #pragma omp parallel { #pragma omp master { size=omp_get_num_threads(); data=(int*) malloc(size*sizeof(int)); } }/* end parallel*/ srand(45); for (iter = 0; iter < 100; iter++){ my_random = rand(); /* random number generator is called inside serial region*/ /* the first parallel region is used to initialiye myvalue and the array with my_random+rank*/ #pragma omp parallel { int rank; rank = omp_get_thread_num (); myvalue = data[rank] = my_random + rank; } /* the second parallel region verifies that the value of "myvalue" is retained */ #pragma omp parallel reduction(+:failed) { int rank; rank = omp_get_thread_num (); failed = failed + (myvalue != data[rank]); if(myvalue != data[rank]){ fprintf (logFile, " myvalue = %d, data[rank]= %d\n", myvalue, data[rank]); } } } free (data); return (known_sum == sum) && !failed; } /* end of check_threadprivate*/ </ompts:testcode> </ompts:test>
test.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include "../utilities/check.h" #include "../utilities/utilities.h" const int bs = 1024; const int nb = 512; const int X_VAL = 99; const int Y_VAL = 11; int main() { check_offloading(); long cpuExec = 0; #pragma omp target map(tofrom: cpuExec) { cpuExec = omp_is_initial_device(); } // Checking team-level swap doesn't currently work on host. if (!cpuExec) { // Initialise. int *x = (int*)malloc(sizeof(int)*nb); int *y = (int*)malloc(sizeof(int)*nb); for(int ii = 0; ii < nb; ++ii) { x[ii] = X_VAL; y[ii] = Y_VAL; } /// Test team-level dependencies #pragma omp target map(tofrom: x[:nb], y[:nb]) #pragma omp teams num_teams(nb) thread_limit(bs) #pragma omp distribute parallel for for(int ii = 0; ii < nb*bs; ++ii) { #pragma omp critical { // Guaranteed to return additive identity, avoiding optimisation. const int identity = !x[omp_get_team_num()]; // Perform swap. const int temp = y[omp_get_team_num()]; y[omp_get_team_num()] = x[omp_get_team_num()] + identity; x[omp_get_team_num()] = temp; } } // Validate. int failures = 0; for(int ii = 0; ii < nb; ++ii) failures += (x[ii] != X_VAL || y[ii] != Y_VAL); if(failures) printf("failed %d times\n", failures); else printf("Succeeded\n"); /// Test team-level dependencies with increment #pragma omp target map(tofrom: x[:nb], y[:nb]) #pragma omp teams num_teams(nb) thread_limit(bs) #pragma omp distribute parallel for for(int ii = 0; ii < nb*bs; ++ii) { #pragma omp critical { // Perform swap. const int temp = y[omp_get_team_num()]; y[omp_get_team_num()] = x[omp_get_team_num()] + 1; x[omp_get_team_num()] = temp; } } // Validate. failures = 0; const int xcheck = X_VAL + (bs/2); const int ycheck = Y_VAL + (bs/2); for(int ii = 0; ii < nb; ++ii) failures += (x[ii] != xcheck || y[ii] != ycheck); if(failures) printf("failed %d times\n", failures); else printf("Succeeded\n"); } else {// if !cpuExec DUMP_SUCCESS(2); } }
nvptx_device_math_sin.c
// REQUIRES: nvptx-registered-target // RUN: %clang_cc1 -x c -internal-isystem %S/Inputs/include -fopenmp -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc // RUN: %clang_cc1 -x c -include __clang_openmp_device_functions.h -internal-isystem %S/../../lib/Headers/openmp_wrappers -internal-isystem %S/Inputs/include -fopenmp -triple nvptx64-nvidia-cuda -aux-triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=SLOW // RUN: %clang_cc1 -x c -internal-isystem %S/Inputs/include -fopenmp -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc -ffast-math -ffp-contract=fast // RUN: %clang_cc1 -x c -include __clang_openmp_device_functions.h -internal-isystem %S/../../lib/Headers/openmp_wrappers -internal-isystem %S/Inputs/include -fopenmp -triple nvptx64-nvidia-cuda -aux-triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -ffast-math -ffp-contract=fast | FileCheck %s --check-prefix=FAST // expected-no-diagnostics #include <math.h> double math(float f, double d, long double ld) { double r = 0; // SLOW: call float @__nv_sinf(float // FAST: call fast float @__nv_fast_sinf(float r += sinf(f); // SLOW: call double @__nv_sin(double // FAST: call fast double @__nv_sin(double r += sin(d); return r; } long double foo(float f, double d, long double ld) { double r = ld; r += math(f, d, ld); #pragma omp target map(r) { r += math(f, d, ld); } return r; }
Example_nestable_lock.1.c
/* * @@name: nestable_lock.1c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success */ #include <omp.h> typedef struct { int a,b; omp_nest_lock_t lck; } pair; int work1(); int work2(); int work3(); void incr_a(pair *p, int a) { /* Called only from incr_pair, no need to lock. */ p->a += a; } void incr_b(pair *p, int b) { /* Called both from incr_pair and elsewhere, */ /* so need a nestable lock. */ omp_set_nest_lock(&p->lck); p->b += b; omp_unset_nest_lock(&p->lck); } void incr_pair(pair *p, int a, int b) { omp_set_nest_lock(&p->lck); incr_a(p, a); incr_b(p, b); omp_unset_nest_lock(&p->lck); } void nestlock(pair *p) { #pragma omp parallel sections { #pragma omp section incr_pair(p, work1(), work2()); #pragma omp section incr_b(p, work3()); } }
functions.c
#include<stdio.h> #include<omp.h> int main(){ // disable dynamic adjustment of number of threads omp_set_dynamic(0); int procs = omp_get_num_procs(); printf("Procs: %d\n", procs); printf("Max threads: %d\n", omp_get_max_threads()); omp_set_num_threads(2); printf("In parallel: %d\n", omp_in_parallel()); #pragma omp parallel { int threads = omp_get_num_threads(); printf("Threads: %d\n", threads); int id = omp_get_thread_num(); printf("ID: %d\n", id); printf("In paralllel: %d\n", omp_in_parallel()); } return 0; }
residual_based_bdf_displacement_scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_RESIDUAL_BASED_BDF_DISPLACEMENT_SCHEME ) #define KRATOS_RESIDUAL_BASED_BDF_DISPLACEMENT_SCHEME /* System includes */ /* External includes */ /* Project includes */ #include "solving_strategies/schemes/residual_based_bdf_scheme.h" #include "includes/variables.h" #include "includes/checks.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedBDFDisplacementScheme * @ingroup KratosCore * @brief BDF integration scheme (displacement based) * @details The \f$ n \f$ order Backward Differentiation Formula (BDF) method is a two step \f$ n \f$ order accurate method. * Look at the base class for more details * @see ResidualBasedBDFScheme * @author Vicente Mataix Ferrandiz */ template<class TSparseSpace, class TDenseSpace> class ResidualBasedBDFDisplacementScheme : public ResidualBasedBDFScheme<TSparseSpace, TDenseSpace> { public: ///@name Type Definitions ///@{ /// Pointer definition of ResidualBasedBDFDisplacementScheme KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedBDFDisplacementScheme ); /// Base class definition typedef Scheme<TSparseSpace,TDenseSpace> BaseType; typedef ResidualBasedImplicitTimeScheme<TSparseSpace,TDenseSpace> ImplicitBaseType; typedef ResidualBasedBDFScheme<TSparseSpace,TDenseSpace> BDFBaseType; /// Data type definition typedef typename BDFBaseType::TDataType TDataType; /// Matrix type definition typedef typename BDFBaseType::TSystemMatrixType TSystemMatrixType; /// Vector type definition typedef typename BDFBaseType::TSystemVectorType TSystemVectorType; /// Local system matrix type definition typedef typename BDFBaseType::LocalSystemVectorType LocalSystemVectorType; /// Local system vector type definition typedef typename BDFBaseType::LocalSystemMatrixType LocalSystemMatrixType; /// DoF array type definition typedef typename BDFBaseType::DofsArrayType DofsArrayType; /// DoF vector type definition typedef typename Element::DofsVectorType DofsVectorType; /// Nodes containers definition typedef ModelPart::NodesContainerType NodesArrayType; /// Elements containers definition typedef ModelPart::ElementsContainerType ElementsArrayType; /// Conditions containers definition typedef ModelPart::ConditionsContainerType ConditionsArrayType; ///@} ///@name Life Cycle ///@{ /** * @brief Constructor. The BDF method (parameters) * @param ThisParameters Parameters with the integration order */ explicit ResidualBasedBDFDisplacementScheme(Parameters ThisParameters) : ResidualBasedBDFDisplacementScheme(ThisParameters.Has("integration_order") ? static_cast<std::size_t>(ThisParameters["integration_order"].GetInt()) : 2) { // Validate default parameters Parameters default_parameters = Parameters(R"( { "integration_order" : 2 })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); } /** * @brief Constructor. The BDF method * @param Order The integration order * @todo The ideal would be to use directly the dof or the variable itself to identify the type of variable and is derivatives */ explicit ResidualBasedBDFDisplacementScheme(const std::size_t Order = 2) :BDFBaseType(Order) { } /** Copy Constructor. */ explicit ResidualBasedBDFDisplacementScheme(ResidualBasedBDFDisplacementScheme& rOther) :BDFBaseType(rOther) { } /** * Clone */ typename BaseType::Pointer Clone() override { return Kratos::make_shared<ResidualBasedBDFDisplacementScheme>(*this); } /** Destructor. */ ~ResidualBasedBDFDisplacementScheme () override {} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Performing the prediction of the solution * @details It predicts the solution for the current step x = xold + vold * Dt * @param rModelPart The model of the problem to solve * @param rDofSet Set of all primary variables * @param A LHS matrix * @param Dx Incremental update of primary variables * @param b RHS Vector */ void Predict( ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) override { KRATOS_TRY; ProcessInfo& current_process_info = rModelPart.GetProcessInfo(); const double delta_time = current_process_info[DELTA_TIME]; // Updating time derivatives (nodally for efficiency) const int num_nodes = static_cast<int>( rModelPart.Nodes().size() ); #pragma omp parallel for for(int i = 0; i< num_nodes; ++i) { auto it_node = rModelPart.Nodes().begin() + i; //ATTENTION::: the prediction is performed only on free nodes const array_1d<double, 3>& dot2un1 = it_node->FastGetSolutionStepValue(ACCELERATION, 1); const array_1d<double, 3>& dotun1 = it_node->FastGetSolutionStepValue(VELOCITY, 1); const array_1d<double, 3>& un1 = it_node->FastGetSolutionStepValue(DISPLACEMENT, 1); const array_1d<double, 3>& dot2un0 = it_node->FastGetSolutionStepValue(ACCELERATION); array_1d<double, 3>& dotun0 = it_node->FastGetSolutionStepValue(VELOCITY); array_1d<double, 3>& un0 = it_node->FastGetSolutionStepValue(DISPLACEMENT); if (it_node->HasDofFor(ACCELERATION_X)) { if (it_node -> IsFixed(ACCELERATION_X)) { dotun0[0] = (dot2un0[0] - BDFBaseType::mBDF[1] * dotun1[0])/BDFBaseType::mBDF[0]; un0[0] = (dotun0[0] - BDFBaseType::mBDF[1] * un1[0])/BDFBaseType::mBDF[0]; } } else if (it_node->HasDofFor(VELOCITY_X)) { if (it_node -> IsFixed(VELOCITY_X)) { un0[0] = (dotun1[0] - BDFBaseType::mBDF[1] * un1[0])/BDFBaseType::mBDF[0]; } } else if (it_node -> IsFixed(DISPLACEMENT_X) == false) { un0[0] = un1[0] + delta_time * dotun1[0] + 0.5 * std::pow(delta_time, 2) * dot2un1[0]; } if (it_node->HasDofFor(ACCELERATION_Y)) { if (it_node -> IsFixed(ACCELERATION_Y)) { dotun0[1] = (dot2un0[1] - BDFBaseType::mBDF[1] * dotun1[1])/BDFBaseType::mBDF[0]; un0[1] = (dotun0[1] - BDFBaseType::mBDF[1] * un1[1])/BDFBaseType::mBDF[0]; } } else if (it_node->HasDofFor(VELOCITY_Y)) { if (it_node -> IsFixed(VELOCITY_Y)) { un0[1] = (dotun1[1] - BDFBaseType::mBDF[1] * un1[1])/BDFBaseType::mBDF[0]; } } else if (it_node -> IsFixed(DISPLACEMENT_Y) == false) { un0[1] = un1[1] + delta_time * dotun1[1] + 0.5 * std::pow(delta_time, 2) * dot2un1[1]; } // For 3D cases if (it_node -> HasDofFor(DISPLACEMENT_Z)) { if (it_node->HasDofFor(ACCELERATION_Z)) { if (it_node -> IsFixed(ACCELERATION_Z)) { dotun0[2] = (dot2un0[2] - BDFBaseType::mBDF[1] * dotun1[2])/BDFBaseType::mBDF[0]; un0[2] = (dotun0[2] - BDFBaseType::mBDF[1] * un1[2])/BDFBaseType::mBDF[0]; } } else if (it_node->HasDofFor(VELOCITY_Y)) { if (it_node -> IsFixed(VELOCITY_Y)) { un0[2] = (dotun1[2] - BDFBaseType::mBDF[1] * un1[2])/BDFBaseType::mBDF[0]; } } else if (it_node -> IsFixed(DISPLACEMENT_Z) == false) { un0[2] = un1[2] + delta_time * dotun1[2] + 0.5 * std::pow(delta_time, 2) * dot2un1[2]; } } for (std::size_t i_order = 2; i_order < BDFBaseType::mOrder + 1; ++i_order) { const array_1d<double, 3>& dotun = it_node->FastGetSolutionStepValue(VELOCITY, i_order); const array_1d<double, 3>& un = it_node->FastGetSolutionStepValue(DISPLACEMENT, i_order); if (it_node->HasDofFor(ACCELERATION_X)) { if (it_node -> IsFixed(ACCELERATION_X)) { dotun0[0] -= (BDFBaseType::mBDF[i_order] * dotun[0])/BDFBaseType::mBDF[0]; un0[0] -= (BDFBaseType::mBDF[i_order] * un[0])/BDFBaseType::mBDF[0]; } } else if (it_node->HasDofFor(VELOCITY_X)) { if (it_node -> IsFixed(VELOCITY_X)) { un0[0] -= (BDFBaseType::mBDF[i_order] * un[0])/BDFBaseType::mBDF[0]; } } if (it_node->HasDofFor(ACCELERATION_Y)) { if (it_node -> IsFixed(ACCELERATION_Y)) { dotun0[1] -= (BDFBaseType::mBDF[i_order] * dotun[1])/BDFBaseType::mBDF[0]; un0[1] -= (BDFBaseType::mBDF[i_order] * un[1])/BDFBaseType::mBDF[0]; } } else if (it_node->HasDofFor(VELOCITY_Y)) { if (it_node -> IsFixed(VELOCITY_X)) { un0[1] -= (BDFBaseType::mBDF[i_order] * un[1])/BDFBaseType::mBDF[0]; } } // For 3D cases if (it_node -> HasDofFor(DISPLACEMENT_Z)) { if (it_node->HasDofFor(ACCELERATION_Z)) { if (it_node -> IsFixed(ACCELERATION_Z)) { dotun0[1] -= (BDFBaseType::mBDF[i_order] * dotun[2])/BDFBaseType::mBDF[0]; un0[1] -= (BDFBaseType::mBDF[i_order] * un[2])/BDFBaseType::mBDF[0]; } } else if (it_node->HasDofFor(VELOCITY_Y)) { if (it_node -> IsFixed(VELOCITY_X)) { un0[1] -= (BDFBaseType::mBDF[i_order] * un[2])/BDFBaseType::mBDF[0]; } } } } // Updating time derivatives UpdateFirstDerivative(it_node); UpdateSecondDerivative(it_node); } KRATOS_CATCH( "" ); } /** * @brief This function is designed to be called once to perform all the checks needed * on the input provided. * @details Checks can be "expensive" as the function is designed * to catch user's errors. * @param rModelPart The model of the problem to solve * @return Zero means all ok */ int Check(ModelPart& rModelPart) override { KRATOS_TRY; const int err = BDFBaseType::Check(rModelPart); if(err!=0) return err; // Check for variables keys // Verify that the variables are correctly initialized KRATOS_CHECK_VARIABLE_KEY(DISPLACEMENT) KRATOS_CHECK_VARIABLE_KEY(VELOCITY) KRATOS_CHECK_VARIABLE_KEY(ACCELERATION) // Check that variables are correctly allocated for(auto& rnode : rModelPart.Nodes()) { KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DISPLACEMENT,rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(VELOCITY,rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(ACCELERATION,rnode) KRATOS_CHECK_DOF_IN_NODE(DISPLACEMENT_X, rnode) KRATOS_CHECK_DOF_IN_NODE(DISPLACEMENT_Y, rnode) KRATOS_CHECK_DOF_IN_NODE(DISPLACEMENT_Z, rnode) } KRATOS_CATCH( "" ); return 0; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedBDFDisplacementScheme"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief Updating first time derivative (velocity) * @param itNode the node interator */ inline void UpdateFirstDerivative(NodesArrayType::iterator itNode) override { array_1d<double, 3>& dotun0 = itNode->FastGetSolutionStepValue(VELOCITY); noalias(dotun0) = BDFBaseType::mBDF[0] * itNode->FastGetSolutionStepValue(DISPLACEMENT); for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order) noalias(dotun0) += BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(DISPLACEMENT, i_order); } /** * @brief Updating second time derivative (acceleration) * @param itNode the node interator */ inline void UpdateSecondDerivative(NodesArrayType::iterator itNode) override { array_1d<double, 3>& dot2un0 = itNode->FastGetSolutionStepValue(ACCELERATION); noalias(dot2un0) = BDFBaseType::mBDF[0] * itNode->FastGetSolutionStepValue(VELOCITY); for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order) noalias(dot2un0) += BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(VELOCITY, i_order); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@{ private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedBDFDisplacementScheme */ ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_BDF_DISPLACEMENT_SCHEME defined */
convolution_7x7_pack1to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv7x7s2_pack1to4_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = 49; // im2col Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator); { const int gap = w * 2 - outw * 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); signed char* ptr = bottom_im2col.channel(p); for (int u = 0; u < 7; u++) { for (int v = 0; v < 7; v++) { const signed char* sptr = img.row<const signed char>(u) + v; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { ptr[0] = sptr[0]; ptr[1] = sptr[2]; ptr[2] = sptr[4]; ptr[3] = sptr[6]; sptr += 8; ptr += 4; } for (; j + 1 < outw; j += 2) { ptr[0] = sptr[0]; ptr[1] = sptr[2]; sptr += 4; ptr += 2; } for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += 2; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_pack1to4_int8_sse(bottom_im2col, top_blob, kernel, opt); }
omp_task.c
<ompts:test> <ompts:testdescription>Test the omp task directive. The idea of the tests is to generate a set of tasks in a single region. We pause the tasks generated so that other threads get scheduled to the newly opened tasks.</ompts:testdescription> <ompts:ompversion>3.0</ompts:ompversion> <ompts:directive>omp task</ompts:directive> <ompts:dependences>omp single</ompts:dependences> <ompts:testcode> #include <stdio.h> #include <math.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" int <ompts:testcode:functionname>omp_task</ompts:testcode:functionname>(FILE * logFile){ <ompts:orphan:vars> int tids[NUM_TASKS]; int i; </ompts:orphan:vars> #pragma omp parallel { #pragma omp single { for (i = 0; i < NUM_TASKS; i++) { <ompts:orphan> /* First we have to store the value of the loop index in a new variable * which will be private for each task because otherwise it will be overwritten * if the execution of the task takes longer than the time which is needed to * enter the next step of the loop! */ int myi; myi = i; <ompts:check>#pragma omp task</ompts:check> { my_sleep (SLEEPTIME); tids[myi] = omp_get_thread_num(); } /* end of omp task */ </ompts:orphan> } /* end of for */ } /* end of single */ } /*end of parallel */ /* Now we check if more than one thread executed the tasks. */ for (i = 0; i < NUM_TASKS; i++) { if (tids[0] != tids[i]) return 1; } return 0; } /* end of omp_task */ </ompts:testcode> </ompts:test>
direct.c
#define ERRORTEST 0 #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/resource.h> #include <sys/time.h> #ifdef ENABLE_OPENMP #include <omp.h> #endif #include "gp5util.h" #define NJMAX (JMEMSIZE) // #define NJMAX 65536 #define rdtscll(val) do { \ unsigned int a,d; \ asm volatile("rdtsc" : "=a"(a), "=d"(d)); \ (val) = ((unsigned long)a) | (((unsigned long)d)<<32); \ } while(0) #if 1 #define GIGAHELTZ 3.8 double get_dtime(void){ struct timeval tv; gettimeofday(&tv, NULL); return ((double)(tv.tv_sec) + (double)(tv.tv_usec) * 0.001 * 0.001); } #endif void get_cputime(double *laptime, double *sprittime); void readnbody(int *nj, double *mj, double (*xj)[3], double (*vj)[3], char *fname) { int i, dummy, fi; double dummyd; FILE *fp; fp = fopen(fname, "r"); if (fp == NULL) { perror("readnbody"); exit(1); } fi = fscanf(fp, "%d\n", nj); fi = fscanf(fp, "%d\n", &dummy); fi = fscanf(fp, "%lf\n", &dummyd); fi = fprintf(stderr, "nj: %d\n", *nj); for (i = 0; i < *nj; i++) { fi = fscanf(fp, "%lf\n", mj+i); } for (i = 0; i < *nj; i++) { fi = fscanf(fp, "%lf %lf %lf\n", xj[i]+0, xj[i]+1, xj[i]+2); } for (i = 0; i < *nj; i++) { fi = fscanf(fp, "%lf %lf %lf\n", vj[i]+0, vj[i]+1, vj[i]+2); } } void writenbody(int nj, double *mj, double (*xj)[3], double (*vj)[3], char *fname) { int i; FILE *fp; fp = fopen(fname, "w"); fprintf(fp, "%d\n", nj); fprintf(fp, "%d\n", 3); fprintf(fp, "%e\n", 0.0); for (i = 0; i < nj; i++) { fprintf(fp, "%e\n", mj[i]); } for (i = 0; i < nj; i++) { fprintf(fp, "%e %e %e\n", xj[i][0], xj[i][1], xj[i][2]); } for (i = 0; i < nj; i++) { fprintf(fp, "%e %e %e\n", vj[i][0], vj[i][1], vj[i][2]); } } void calc_gravity(double *mj, double (*xj)[3], double (*vj)[3], double eps, double (*a)[3], double *p, int nj) { double epsinv; int i; double cycle; // g5_set_xj(0, nj, xj); // g5_set_mj(0, nj, mj); g5_set_xmj(0, nj, xj, mj); g5_set_eps_to_all(eps); g5_set_n(nj); double st1 = get_dtime(); g5_calculate_force_on_x(xj, a, p, nj); double st2 = get_dtime(); cycle = (double)(st2 - st1) * GIGAHELTZ * 1e9 / ((double)nj*(double)nj/4); #ifdef ENABLE_OPENMP #pragma omp parallel #if 1 { if(omp_get_thread_num() == 0) cycle *= omp_get_num_threads(); } #else cycle *= omp_get_num_threads(); #endif #endif printf("gravity %f cycle per loop\n", cycle); for (i = 0; i < nj; i++) { p[i] = -p[i]; } if (eps != 0.0) { epsinv = 1.0/eps; for (i = 0; i < nj; i++) { p[i] = p[i] + mj[i] * epsinv; } } } #ifdef SYMMETRIC void calc_gravity0(double *mj, double (*xj)[3], double (*vj)[3], double *epsj2, double (*a)[3], double *p, int nj) { double epsinv; int i; double cycle; g5_set_xmj0(0, nj, xj, mj, epsj2); g5_set_n(nj); double st1 = get_dtime(); g5_calculate_force_on_x0(xj, a, p, nj, epsj2); double st2 = get_dtime(); cycle = (double)(st2 - st1) * GIGAHELTZ * 1e9 / ((double)nj*(double)nj/4); #ifdef ENABLE_OPENMP #pragma omp parallel #if 1 { if(omp_get_thread_num() == 0) cycle *= omp_get_num_threads(); } #else cycle *= omp_get_num_threads(); #endif #endif printf("gravity %f cycle per loop\n", cycle); for (i = 0; i < nj; i++) { p[i] = -p[i]; if (epsj2[i] != 0.0) { epsinv = 1.0 / (sqrt(2.0) * sqrt(epsj2[i])); p[i] = p[i] + mj[i] * epsinv; } } } #endif void push_velocity(double (*vj)[3], double (*a)[3], double dt, int nj) { int j, k; for (j = 0; j < nj; j++) { for (k = 0; k < 3; k++) { vj[j][k] += dt * a[j][k]; } } } void push_position(double (*xj)[3], double (*vj)[3], double (*a)[3], double dt, int nj) { int j, k; for (j = 0; j < nj; j++) { for (k = 0; k < 3; k++) { xj[j][k] += dt * vj[j][k]; } } } void energy(double *mj, double (*vj)[3], double *p, int nj, double *ke, double *pe) { int i, k; *pe = 0; *ke = 0; for (i = 0; i < nj; i++) { *pe += mj[i] * p[i]; for (k = 0; k < 3; k++) { *ke += 0.5 * mj[i] * vj[i][k] * vj[i][k]; } } *pe /= 2.0; } int main(int argc, char **argv) { static double mj[NJMAX], xj[NJMAX][3], vj[NJMAX][3], epsj2[NJMAX]; static double a[NJMAX][3], p[NJMAX]; double xmax, xmin, mmin; double time; // double eps, dt, endt; double dt, endt; double e, e0, ke, pe; double LapTime, SpritTime, IntPerSec, Gflops; int nj; int nstep, step; dt = 0.01; endt = 10.0; time = 0.0; nstep = endt/dt; xmax = 10.0; xmin = -10.0; if (argc < 3) { fprintf(stderr, "usage: %s <infile> <outfile>\n", argv[0]); exit(1); } readnbody(&nj, mj, xj, vj, argv[1]); mmin = mj[0]; #if ERRORTEST == 1 double eps; eps = 4.0 / (double)nj; #else #ifdef SYMMETRIC int i; for(i = 0; i < nj; i++) epsj2[i] = (0.01 + 0.01 * (double)i / (double)nj) * (0.01 + 0.01 * (double)i / (double)nj); // mj[1021] = 1.0; // mj[1022] = 1.0; // mj[1023] = 1.0; #else double eps; eps = 0.02; #endif #endif g5_open(); g5_set_range(xmin, xmax, mmin); #ifdef SYMMETRIC calc_gravity0(mj, xj, vj, epsj2, a, p, nj); #else calc_gravity(mj, xj, vj, eps, a, p, nj); #endif energy(mj, vj, p, nj, &ke, &pe); e0 = ke+pe; #if ERRORTEST == 1 int i; char out[1024]; FILE *fp; sprintf(out, "pl%03dk_eps4n_avx.ap", nj / 1024); fp = fopen(out, "w"); for(i = 0; i < nj; i++) fprintf(fp, "%5d %+.16e %+.16e\n", i, sqrt(a[i][0]*a[i][0]+a[i][1]*a[i][1]+a[i][2]*a[i][2]), p[i]); fclose(fp); exit(0); #endif // TimeStart = (double)clock() / CLOCKS_PER_SEC; get_cputime(&LapTime, &SpritTime); for (step = 1; step < nstep; step++) { push_velocity(vj, a, 0.5*dt, nj); push_position(xj, vj, a, dt, nj); time = time + dt; #ifdef SYMMETRIC calc_gravity0(mj, xj, vj, epsj2, a, p, nj); #else calc_gravity(mj, xj, vj, eps, a, p, nj); #endif push_velocity(vj, a, 0.5*dt, nj); #ifdef ANIM plot_star(xj, nj, time, 0.3, mj, mj[0]); #endif /* ANIM */ if (step % (nstep/10) == 0) { energy(mj, vj, p, nj, &ke, &pe); e = ke+pe; // TimeEnd = (double)clock() / CLOCKS_PER_SEC; get_cputime(&LapTime, &SpritTime); IntPerSec = ((double)nj * (double)nj * (long)(nstep/10)) / LapTime; Gflops = IntPerSec * 38. * 1.e-9; printf("step: %d time: %e\n", step, time); printf("e: %e de: %e\n", e, e-e0); printf("ke: %e pe: %e\n", ke, pe); printf("ke/pe: %e\n\n", ke/pe); printf("%e interaction per sec, %f Gflops \n", IntPerSec, Gflops); // TimeStart = TimeEnd; } } g5_close(); writenbody(nj, mj, xj, vj, argv[2]); return 0; }
ParallelFor.h
// Copyright (c) 2004-2022 Tomáš Oberhuber et al. // // This file is part of TNL - Template Numerical Library (https://tnl-project.org/) // // SPDX-License-Identifier: MIT #pragma once #include <noa/3rdparty/tnl-noa/src/TNL/Devices/Sequential.h> #include <noa/3rdparty/tnl-noa/src/TNL/Devices/Host.h> #include <noa/3rdparty/tnl-noa/src/TNL/Devices/Cuda.h> #include <noa/3rdparty/tnl-noa/src/TNL/Cuda/CheckDevice.h> #include <noa/3rdparty/tnl-noa/src/TNL/Cuda/DeviceInfo.h> #include <noa/3rdparty/tnl-noa/src/TNL/Cuda/LaunchHelpers.h> #include <noa/3rdparty/tnl-noa/src/TNL/Cuda/KernelLaunch.h> #include <noa/3rdparty/tnl-noa/src/TNL/Math.h> /**** * The implementation of ParallelFor is not meant to provide maximum performance * at every cost, but maximum flexibility for operating with data stored on the * device. * * The grid-stride loop for CUDA has been inspired by Nvidia's blog post: * https://devblogs.nvidia.com/parallelforall/cuda-pro-tip-write-flexible-kernels-grid-stride-loops/ * * Implemented by: Jakub Klinkovsky */ namespace noa::TNL { /** * \brief Namespace for fundamental TNL algorithms * * It contains algorithms like for-loops, memory operations, (parallel) reduction, * multireduction, scan etc. */ namespace Algorithms { // TODO: ParallelForMode should be moved to Device (=Executor) /** * \brief Enum for the parallel processing of the for-loop. * * Synchronous means that the program control returns to the caller when the loop is processed completely. * Asynchronous means that the program control returns to the caller immediately even before the loop is processing is finished. * * Only parallel for-loops in CUDA are affected by this mode. */ enum ParallelForMode { SynchronousMode, AsynchronousMode }; /** * \brief Parallel for loop for one dimensional interval of indices. * * \tparam Device specifies the device where the for-loop will be executed. * It can be \ref TNL::Devices::Host, \ref TNL::Devices::Cuda or * \ref TNL::Devices::Sequential. * \tparam Mode defines synchronous/asynchronous mode on parallel devices. */ template< typename Device = Devices::Sequential, ParallelForMode Mode = SynchronousMode > struct ParallelFor { /** * \brief Static method for the execution of the loop. * * \tparam Index is the type of the loop indices. * \tparam Function is the type of the functor to be called in each iteration * (it is usually deduced from the argument used in the function call). * \tparam FunctionArgs is a variadic pack of types for additional parameters * that are forwarded to the functor in every iteration. * * \param start is the left bound of the iteration range `[begin, end)`. * \param end is the right bound of the iteration range `[begin, end)`. * \param f is the function to be called in each iteration. * \param args are additional parameters to be passed to the function f. * * \par Example * \include Algorithms/ParallelForExample.cpp * \par Output * \include ParallelForExample.out * */ template< typename Index, typename Function, typename... FunctionArgs > static void exec( Index start, Index end, Function f, FunctionArgs... args ) { for( Index i = start; i < end; i++ ) f( i, args... ); } }; /** * \brief Parallel for loop for two dimensional domain of indices. * * \tparam Device specifies the device where the for-loop will be executed. * It can be \ref TNL::Devices::Host, \ref TNL::Devices::Cuda or * \ref TNL::Devices::Sequential. * \tparam Mode defines synchronous/asynchronous mode on parallel devices. */ template< typename Device = Devices::Sequential, ParallelForMode Mode = SynchronousMode > struct ParallelFor2D { /** * \brief Static method for the execution of the loop. * * \tparam Index is the type of the loop indices. * \tparam Function is the type of the functor to be called in each iteration * (it is usually deduced from the argument used in the function call). * \tparam FunctionArgs is a variadic pack of types for additional parameters * that are forwarded to the functor in every iteration. * * \param startX the for-loop iterates over index domain `[startX,endX) x [startY,endY)`. * \param startY the for-loop iterates over index domain `[startX,endX) x [startY,endY)`. * \param endX the for-loop iterates over index domain `[startX,endX) x [startY,endY)`. * \param endY the for-loop iterates over index domain `[startX,endX) x [startY,endY)`. * \param f is the function to be called in each iteration * \param args are additional parameters to be passed to the function f. * * The function f is called for each iteration as * * \code * f( i, j, args... ) * \endcode * * where the first parameter is changing more often than the second one. * * \par Example * \include Algorithms/ParallelForExample-2D.cpp * \par Output * \include ParallelForExample-2D.out * */ template< typename Index, typename Function, typename... FunctionArgs > static void exec( Index startX, Index startY, Index endX, Index endY, Function f, FunctionArgs... args ) { for( Index j = startY; j < endY; j++ ) for( Index i = startX; i < endX; i++ ) f( i, j, args... ); } }; /** * \brief Parallel for loop for three dimensional domain of indices. * * \tparam Device specifies the device where the for-loop will be executed. * It can be \ref TNL::Devices::Host, \ref TNL::Devices::Cuda or * \ref TNL::Devices::Sequential. * \tparam Mode defines synchronous/asynchronous mode on parallel devices. */ template< typename Device = Devices::Sequential, ParallelForMode Mode = SynchronousMode > struct ParallelFor3D { /** * \brief Static method for the execution of the loop. * * \tparam Index is the type of the loop indices. * \tparam Function is the type of the functor to be called in each iteration * (it is usually deduced from the argument used in the function call). * \tparam FunctionArgs is a variadic pack of types for additional parameters * that are forwarded to the functor in every iteration. * * \param startX the for-loop iterates over index domain `[startX,endX) x [startY,endY) x [startZ,endZ)`. * \param startY the for-loop iterates over index domain `[startX,endX) x [startY,endY) x [startZ,endZ)`. * \param startZ the for-loop iterates over index domain `[startX,endX) x [startY,endY) x [startZ,endZ)`. * \param endX the for-loop iterates over index domain `[startX,endX) x [startY,endY) x [startZ,endZ)`. * \param endY the for-loop iterates over index domain `[startX,endX) x [startY,endY) x [startZ,endZ)`. * \param endZ the for-loop iterates over index domain `[startX,endX) x [startY,endY) x [startZ,endZ)`. * \param f is the function to be called in each iteration * \param args are additional parameters to be passed to the function f. * * The function f is called for each iteration as * * \code * f( i, j, k, args... ) * \endcode * * where the first parameter is changing the most often. * * \par Example * \include Algorithms/ParallelForExample-3D.cpp * \par Output * \include ParallelForExample-3D.out * */ template< typename Index, typename Function, typename... FunctionArgs > static void exec( Index startX, Index startY, Index startZ, Index endX, Index endY, Index endZ, Function f, FunctionArgs... args ) { for( Index k = startZ; k < endZ; k++ ) for( Index j = startY; j < endY; j++ ) for( Index i = startX; i < endX; i++ ) f( i, j, k, args... ); } }; template< ParallelForMode Mode > struct ParallelFor< Devices::Host, Mode > { template< typename Index, typename Function, typename... FunctionArgs > static void exec( Index start, Index end, Function f, FunctionArgs... args ) { #ifdef HAVE_OPENMP // Benchmarks show that this is significantly faster compared // to '#pragma omp parallel for if( Devices::Host::isOMPEnabled() && end - start > 512 )' if( Devices::Host::isOMPEnabled() && end - start > 512 ) { #pragma omp parallel for for( Index i = start; i < end; i++ ) f( i, args... ); } else ParallelFor< Devices::Sequential >::exec( start, end, f, args... ); #else ParallelFor< Devices::Sequential >::exec( start, end, f, args... ); #endif } }; template< ParallelForMode Mode > struct ParallelFor2D< Devices::Host, Mode > { template< typename Index, typename Function, typename... FunctionArgs > static void exec( Index startX, Index startY, Index endX, Index endY, Function f, FunctionArgs... args ) { #ifdef HAVE_OPENMP // Benchmarks show that this is significantly faster compared // to '#pragma omp parallel for if( Devices::Host::isOMPEnabled() )' if( Devices::Host::isOMPEnabled() ) { #pragma omp parallel for for( Index j = startY; j < endY; j++ ) for( Index i = startX; i < endX; i++ ) f( i, j, args... ); } else ParallelFor2D< Devices::Sequential >::exec( startX, startY, endX, endY, f, args... ); #else ParallelFor2D< Devices::Sequential >::exec( startX, startY, endX, endY, f, args... ); #endif } }; template< ParallelForMode Mode > struct ParallelFor3D< Devices::Host, Mode > { template< typename Index, typename Function, typename... FunctionArgs > static void exec( Index startX, Index startY, Index startZ, Index endX, Index endY, Index endZ, Function f, FunctionArgs... args ) { #ifdef HAVE_OPENMP // Benchmarks show that this is significantly faster compared // to '#pragma omp parallel for if( Devices::Host::isOMPEnabled() )' if( Devices::Host::isOMPEnabled() ) { #pragma omp parallel for collapse(2) for( Index k = startZ; k < endZ; k++ ) for( Index j = startY; j < endY; j++ ) for( Index i = startX; i < endX; i++ ) f( i, j, k, args... ); } else ParallelFor3D< Devices::Sequential >::exec( startX, startY, startZ, endX, endY, endZ, f, args... ); #else ParallelFor3D< Devices::Sequential >::exec( startX, startY, startZ, endX, endY, endZ, f, args... ); #endif } }; template< bool gridStrideX = true, typename Index, typename Function, typename... FunctionArgs > __global__ void ParallelForKernel( Index start, Index end, Function f, FunctionArgs... args ) { #ifdef HAVE_CUDA Index i = start + blockIdx.x * blockDim.x + threadIdx.x; while( i < end ) { f( i, args... ); if( gridStrideX ) i += blockDim.x * gridDim.x; else break; } #endif } template< bool gridStrideX = true, bool gridStrideY = true, typename Index, typename Function, typename... FunctionArgs > __global__ void ParallelFor2DKernel( Index startX, Index startY, Index endX, Index endY, Function f, FunctionArgs... args ) { #ifdef HAVE_CUDA Index j = startY + blockIdx.y * blockDim.y + threadIdx.y; Index i = startX + blockIdx.x * blockDim.x + threadIdx.x; while( j < endY ) { while( i < endX ) { f( i, j, args... ); if( gridStrideX ) i += blockDim.x * gridDim.x; else break; } if( gridStrideY ) j += blockDim.y * gridDim.y; else break; } #endif } template< bool gridStrideX = true, bool gridStrideY = true, bool gridStrideZ = true, typename Index, typename Function, typename... FunctionArgs > __global__ void ParallelFor3DKernel( Index startX, Index startY, Index startZ, Index endX, Index endY, Index endZ, Function f, FunctionArgs... args ) { #ifdef HAVE_CUDA Index k = startZ + blockIdx.z * blockDim.z + threadIdx.z; Index j = startY + blockIdx.y * blockDim.y + threadIdx.y; Index i = startX + blockIdx.x * blockDim.x + threadIdx.x; while( k < endZ ) { while( j < endY ) { while( i < endX ) { f( i, j, k, args... ); if( gridStrideX ) i += blockDim.x * gridDim.x; else break; } if( gridStrideY ) j += blockDim.y * gridDim.y; else break; } if( gridStrideZ ) k += blockDim.z * gridDim.z; else break; } #endif } template< ParallelForMode Mode > struct ParallelFor< Devices::Cuda, Mode > { template< typename Index, typename Function, typename... FunctionArgs > static void exec( Index start, Index end, Function f, FunctionArgs... args ) { if( end <= start ) return; Cuda::LaunchConfiguration launch_config; launch_config.blockSize.x = 256; launch_config.gridSize.x = TNL::min( Cuda::getMaxGridSize(), Cuda::getNumberOfBlocks( end - start, launch_config.blockSize.x ) ); constexpr bool synchronous = Mode == SynchronousMode; if( (std::size_t) launch_config.blockSize.x * launch_config.gridSize.x >= (std::size_t) end - start ) { constexpr auto kernel = ParallelForKernel< false, Index, Function, FunctionArgs... >; Cuda::launchKernel< synchronous >( kernel, 0, launch_config, start, end, f, args... ); } else { // decrease the grid size and align to the number of multiprocessors const int desGridSize = 32 * Cuda::DeviceInfo::getCudaMultiprocessors( Cuda::DeviceInfo::getActiveDevice() ); launch_config.gridSize.x = TNL::min( desGridSize, Cuda::getNumberOfBlocks( end - start, launch_config.blockSize.x ) ); constexpr auto kernel = ParallelForKernel< true, Index, Function, FunctionArgs... >; Cuda::launchKernel< synchronous >( kernel, 0, launch_config, start, end, f, args... ); } } }; template< ParallelForMode Mode > struct ParallelFor2D< Devices::Cuda, Mode > { template< typename Index, typename Function, typename... FunctionArgs > static void exec( Index startX, Index startY, Index endX, Index endY, Function f, FunctionArgs... args ) { if( endX <= startX || endY <= startY ) return; const Index sizeX = endX - startX; const Index sizeY = endY - startY; Cuda::LaunchConfiguration launch_config; if( sizeX >= sizeY * sizeY ) { launch_config.blockSize.x = TNL::min( 256, sizeX ); launch_config.blockSize.y = 1; } else if( sizeY >= sizeX * sizeX ) { launch_config.blockSize.x = 1; launch_config.blockSize.y = TNL::min( 256, sizeY ); } else { launch_config.blockSize.x = TNL::min( 32, sizeX ); launch_config.blockSize.y = TNL::min( 8, sizeY ); } launch_config.gridSize.x = TNL::min( Cuda::getMaxGridSize(), Cuda::getNumberOfBlocks( sizeX, launch_config.blockSize.x ) ); launch_config.gridSize.y = TNL::min( Cuda::getMaxGridSize(), Cuda::getNumberOfBlocks( sizeY, launch_config.blockSize.y ) ); constexpr bool synchronous = Mode == SynchronousMode; dim3 gridCount; gridCount.x = roundUpDivision( sizeX, launch_config.blockSize.x * launch_config.gridSize.x ); gridCount.y = roundUpDivision( sizeY, launch_config.blockSize.y * launch_config.gridSize.y ); if( gridCount.x == 1 && gridCount.y == 1 ) { constexpr auto kernel = ParallelFor2DKernel< false, false, Index, Function, FunctionArgs... >; Cuda::launchKernel< synchronous >( kernel, 0, launch_config, startX, startY, endX, endY, f, args... ); } else if( gridCount.x == 1 && gridCount.y > 1 ) { constexpr auto kernel = ParallelFor2DKernel< false, true, Index, Function, FunctionArgs... >; Cuda::launchKernel< synchronous >( kernel, 0, launch_config, startX, startY, endX, endY, f, args... ); } else if( gridCount.x > 1 && gridCount.y == 1 ) { constexpr auto kernel = ParallelFor2DKernel< true, false, Index, Function, FunctionArgs... >; Cuda::launchKernel< synchronous >( kernel, 0, launch_config, startX, startY, endX, endY, f, args... ); } else { constexpr auto kernel = ParallelFor2DKernel< true, true, Index, Function, FunctionArgs... >; Cuda::launchKernel< synchronous >( kernel, 0, launch_config, startX, startY, endX, endY, f, args... ); } } }; template< ParallelForMode Mode > struct ParallelFor3D< Devices::Cuda, Mode > { template< typename Index, typename Function, typename... FunctionArgs > static void exec( Index startX, Index startY, Index startZ, Index endX, Index endY, Index endZ, Function f, FunctionArgs... args ) { if( endX <= startX || endY <= startY || endZ <= startZ ) return; const Index sizeX = endX - startX; const Index sizeY = endY - startY; const Index sizeZ = endZ - startZ; Cuda::LaunchConfiguration launch_config; if( sizeX >= sizeY * sizeY * sizeZ * sizeZ ) { launch_config.blockSize.x = TNL::min( 256, sizeX ); launch_config.blockSize.y = 1; launch_config.blockSize.z = 1; } else if( sizeY >= sizeX * sizeX * sizeZ * sizeZ ) { launch_config.blockSize.x = 1; launch_config.blockSize.y = TNL::min( 256, sizeY ); launch_config.blockSize.z = 1; } else if( sizeZ >= sizeX * sizeX * sizeY * sizeY ) { launch_config.blockSize.x = TNL::min( 2, sizeX ); launch_config.blockSize.y = TNL::min( 2, sizeY ); // CUDA allows max 64 for launch_config.blockSize.z launch_config.blockSize.z = TNL::min( 64, sizeZ ); } else if( sizeX >= sizeZ * sizeZ && sizeY >= sizeZ * sizeZ ) { launch_config.blockSize.x = TNL::min( 32, sizeX ); launch_config.blockSize.y = TNL::min( 8, sizeY ); launch_config.blockSize.z = 1; } else if( sizeX >= sizeY * sizeY && sizeZ >= sizeY * sizeY ) { launch_config.blockSize.x = TNL::min( 32, sizeX ); launch_config.blockSize.y = 1; launch_config.blockSize.z = TNL::min( 8, sizeZ ); } else if( sizeY >= sizeX * sizeX && sizeZ >= sizeX * sizeX ) { launch_config.blockSize.x = 1; launch_config.blockSize.y = TNL::min( 32, sizeY ); launch_config.blockSize.z = TNL::min( 8, sizeZ ); } else { launch_config.blockSize.x = TNL::min( 16, sizeX ); launch_config.blockSize.y = TNL::min( 4, sizeY ); launch_config.blockSize.z = TNL::min( 4, sizeZ ); } launch_config.gridSize.x = TNL::min( Cuda::getMaxGridSize(), Cuda::getNumberOfBlocks( sizeX, launch_config.blockSize.x ) ); launch_config.gridSize.y = TNL::min( Cuda::getMaxGridSize(), Cuda::getNumberOfBlocks( sizeY, launch_config.blockSize.y ) ); launch_config.gridSize.z = TNL::min( Cuda::getMaxGridSize(), Cuda::getNumberOfBlocks( sizeZ, launch_config.blockSize.z ) ); constexpr bool synchronous = Mode == SynchronousMode; dim3 gridCount; gridCount.x = roundUpDivision( sizeX, launch_config.blockSize.x * launch_config.gridSize.x ); gridCount.y = roundUpDivision( sizeY, launch_config.blockSize.y * launch_config.gridSize.y ); gridCount.z = roundUpDivision( sizeZ, launch_config.blockSize.z * launch_config.gridSize.z ); if( gridCount.x == 1 && gridCount.y == 1 && gridCount.z == 1 ) { constexpr auto kernel = ParallelFor3DKernel< false, false, false, Index, Function, FunctionArgs... >; Cuda::launchKernel< synchronous >( kernel, 0, launch_config, startX, startY, startZ, endX, endY, endZ, f, args... ); } else if( gridCount.x == 1 && gridCount.y == 1 && gridCount.z > 1 ) { constexpr auto kernel = ParallelFor3DKernel< false, false, true, Index, Function, FunctionArgs... >; Cuda::launchKernel< synchronous >( kernel, 0, launch_config, startX, startY, startZ, endX, endY, endZ, f, args... ); } else if( gridCount.x == 1 && gridCount.y > 1 && gridCount.z == 1 ) { constexpr auto kernel = ParallelFor3DKernel< false, true, false, Index, Function, FunctionArgs... >; Cuda::launchKernel< synchronous >( kernel, 0, launch_config, startX, startY, startZ, endX, endY, endZ, f, args... ); } else if( gridCount.x > 1 && gridCount.y == 1 && gridCount.z == 1 ) { constexpr auto kernel = ParallelFor3DKernel< true, false, false, Index, Function, FunctionArgs... >; Cuda::launchKernel< synchronous >( kernel, 0, launch_config, startX, startY, startZ, endX, endY, endZ, f, args... ); } else if( gridCount.x == 1 && gridCount.y > 1 && gridCount.z > 1 ) { constexpr auto kernel = ParallelFor3DKernel< false, true, true, Index, Function, FunctionArgs... >; Cuda::launchKernel< synchronous >( kernel, 0, launch_config, startX, startY, startZ, endX, endY, endZ, f, args... ); } else if( gridCount.x > 1 && gridCount.y > 1 && gridCount.z == 1 ) { constexpr auto kernel = ParallelFor3DKernel< true, true, false, Index, Function, FunctionArgs... >; Cuda::launchKernel< synchronous >( kernel, 0, launch_config, startX, startY, startZ, endX, endY, endZ, f, args... ); } else if( gridCount.x > 1 && gridCount.y == 1 && gridCount.z > 1 ) { constexpr auto kernel = ParallelFor3DKernel< true, false, true, Index, Function, FunctionArgs... >; Cuda::launchKernel< synchronous >( kernel, 0, launch_config, startX, startY, startZ, endX, endY, endZ, f, args... ); } else { constexpr auto kernel = ParallelFor3DKernel< true, true, true, Index, Function, FunctionArgs... >; Cuda::launchKernel< synchronous >( kernel, 0, launch_config, startX, startY, startZ, endX, endY, endZ, f, args... ); } } }; } // namespace Algorithms } // namespace noa::TNL
omp-fibonacci.c
#include <stdio.h> #include <omp.h> int f(int n) { int i, j; if (n<2) return n; #pragma omp task shared(i) firstprivate(n) i = f(n-1); #pragma omp task shared(j) firstprivate(n) j = f(n-2); #pragma omp taskwait return i+j; } int main(int argc, char *argv[]) { if (argc != 2) { fprintf(stderr, "usage: %s n\n", argv[0]); return 1; } int n = atoi(argv[1]); #pragma omp parallel shared(n) num_threads(4) { #pragma omp single printf("f(%d) = %d\n", n, f(n)); } }
opencl_office2007_fmt_plug.c
/* MS Office 2007 cracker patch for JtR. Hacked together during March of 2012 by * Dhiru Kholia <dhiru.kholia at gmail.com> * * OpenCL support by magnum. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com> * and Copyright (c) 2012, magnum and it is hereby released to the general public * under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_office2007; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_office2007); #else #include "sha.h" #include <openssl/aes.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "unicode.h" #include "common-opencl.h" #include "config.h" #define PLAINTEXT_LENGTH 51 #define UNICODE_LENGTH 104 /* In octets, including 0x80 */ #define FORMAT_LABEL "office2007-opencl" #define FORMAT_NAME "MS Office 2007" #define OCL_ALGORITHM_NAME "SHA1 OpenCL" #define CPU_ALGORITHM_NAME " AES" #define ALGORITHM_NAME OCL_ALGORITHM_NAME CPU_ALGORITHM_NAME #define BENCHMARK_COMMENT " (50,000 iterations)" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_LENGTH 16 #define SALT_SIZE sizeof(*cur_salt) #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define MIN(a, b) (((a) > (b)) ? (b) : (a)) #define MAX(a, b) (((a) > (b)) ? (a) : (b)) static struct fmt_tests tests[] = { {"$office$*2007*20*128*16*8b2c9e8c878844fc842012273be4bea8*aa862168b80d8c45c852696a8bb499eb*a413507fabe2d87606595f987f679ff4b5b4c2cd", "Password"}, /* 2007-Default_myhovercraftisfullofeels_.docx */ {"$office$*2007*20*128*16*91f095a1fd02595359fe3938fa9236fd*e22668eb1347957987175079e980990f*659f50b9062d36999bf3d0911068c93268ae1d86", "myhovercraftisfullofeels"}, /* 2007-Default_myhovercraftisfullofeels_.dotx */ {"$office$*2007*20*128*16*56ea65016fbb4eac14a6770b2dbe7e99*8cf82ce1b62f01fd3b2c7666a2313302*21443fe938177e648c482da72212a8848c2e9c80", "myhovercraftisfullofeels"}, /* 2007-Default_myhovercraftisfullofeels_.xlsb */ {"$office$*2007*20*128*16*fbd4cc5dab9b8e341778ddcde9eca740*3a040a9cef3d3675009b22f99718e39c*48053b27e95fa53b3597d48ca4ad41eec382e0c8", "myhovercraftisfullofeels"}, /* 2007-Default_myhovercraftisfullofeels_.xlsm */ {"$office$*2007*20*128*16*fbd4cc5dab9b8e341778ddcde9eca740*92bb2ef34ca662ca8a26c8e2105b05c0*0261ba08cd36a324aa1a70b3908a24e7b5a89dd6", "myhovercraftisfullofeels"}, /* 2007-Default_myhovercraftisfullofeels_.xlsx */ {"$office$*2007*20*128*16*fbd4cc5dab9b8e341778ddcde9eca740*46bef371486919d4bffe7280110f913d*b51af42e6696baa097a7109cebc3d0ff7cc8b1d8", "myhovercraftisfullofeels"}, /* 2007-Default_myhovercraftisfullofeels_.xltx */ {"$office$*2007*20*128*16*fbd4cc5dab9b8e341778ddcde9eca740*1addb6823689aca9ce400be8f9e55fc9*e06bf10aaf3a4049ffa49dd91cf9e7bbf88a1b3b", "myhovercraftisfullofeels"}, {NULL} }; static struct custom_salt { char unsigned osalt[SALT_LENGTH]; char unsigned encryptedVerifier[16]; char unsigned encryptedVerifierHash[32]; int version; int verifierHashSize; int keySize; int saltSize; } *cur_salt; static int *cracked, any_cracked; static unsigned int v_width = 1; /* Vector width of kernel */ static char *saved_key; /* Password encoded in UCS-2 */ static int *saved_len; /* UCS-2 password length, in octets */ static char *saved_salt; static unsigned char *key; /* Output key from kernel */ static int new_keys; static cl_mem cl_saved_key, cl_saved_len, cl_salt, cl_pwhash, cl_key; static cl_mem pinned_saved_key, pinned_saved_len, pinned_salt, pinned_key; static cl_kernel GenerateSHA1pwhash, Generate2007key; #define HASH_LOOPS 500 /* Lower figure gives less X hogging */ #define ITERATIONS 50000 #define STEP 0 #define SEED 128 #define OCL_CONFIG "office2007" static const char * warn[] = { "xfer: ", ", xfer: ", ", init: ", ", loop: ", ", final: ", ", xfer: " }; static int split_events[] = { 3, -1, -1 }; //This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { size_t s; s = autotune_get_task_max_work_group_size(FALSE, 0, GenerateSHA1pwhash); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel)); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, Generate2007key)); return s; } static size_t get_task_max_size() { return 0; } static size_t get_default_workgroup() { if (cpu(device_info[gpu_id])) return get_platform_vendor_id(platform_id) == DEV_INTEL ? 8 : 1; else return 64; } static void create_clobj(size_t gws, struct fmt_main *self) { int i; int bench_len = strlen(tests[0].plaintext) * 2; gws *= v_width; pinned_saved_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, UNICODE_LENGTH * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_saved_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, UNICODE_LENGTH * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); saved_key = (char*)clEnqueueMapBuffer(queue[gpu_id], pinned_saved_key, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, UNICODE_LENGTH * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_key"); memset(saved_key, 0, UNICODE_LENGTH * gws); pinned_saved_len = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, sizeof(cl_int) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_saved_len = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, sizeof(cl_int) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); saved_len = (int*)clEnqueueMapBuffer(queue[gpu_id], pinned_saved_len, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, sizeof(cl_int) * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_len"); for (i = 0; i < gws; i++) saved_len[i] = bench_len; pinned_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, SALT_LENGTH, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, SALT_LENGTH, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); saved_salt = (char*) clEnqueueMapBuffer(queue[gpu_id], pinned_salt, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, SALT_LENGTH, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_salt"); memset(saved_salt, 0, SALT_LENGTH); cl_pwhash = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(cl_uint) * 6 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device state buffer"); pinned_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, 16 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, 16 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); key = (unsigned char*) clEnqueueMapBuffer(queue[gpu_id], pinned_key, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, 16 * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory key"); memset(key, 0, 16 * gws); HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 0, sizeof(cl_mem), (void*)&cl_saved_key), "Error setting argument 0"); HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 1, sizeof(cl_mem), (void*)&cl_saved_len), "Error setting argument 1"); HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 2, sizeof(cl_mem), (void*)&cl_salt), "Error setting argument 2"); HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 3, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 3"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 0"); HANDLE_CLERROR(clSetKernelArg(Generate2007key, 0, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 0"); HANDLE_CLERROR(clSetKernelArg(Generate2007key, 1, sizeof(cl_mem), (void*)&cl_key), "Error setting argument 1"); cracked = mem_alloc(sizeof(*cracked) * gws); } static void release_clobj(void) { HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_key, key, 0, NULL, NULL), "Error Unmapping key"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_saved_key, saved_key, 0, NULL, NULL), "Error Unmapping saved_key"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_saved_len, saved_len, 0, NULL, NULL), "Error Unmapping saved_len"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_salt, saved_salt, 0, NULL, NULL), "Error Unmapping saved_salt"); HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error releasing memory mappings"); HANDLE_CLERROR(clReleaseMemObject(pinned_key), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(pinned_saved_key), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(pinned_saved_len), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(pinned_salt), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(cl_key), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(cl_saved_key), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(cl_saved_len), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(cl_salt), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(cl_pwhash), "Release GPU buffer"); MEM_FREE(cracked); } static void done(void) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(GenerateSHA1pwhash), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(Generate2007key), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); } static void clear_keys(void) { memset(saved_key, 0, UNICODE_LENGTH * global_work_size * v_width); memset(saved_len, 0, sizeof(*saved_len) * global_work_size * v_width); } static void set_key(char *key, int index) { UTF16 *utfkey = (UTF16*)&saved_key[index * UNICODE_LENGTH]; /* convert key to UTF-16LE */ saved_len[index] = enc_to_utf16(utfkey, PLAINTEXT_LENGTH, (UTF8*)key, strlen(key)); if (saved_len[index] < 0) saved_len[index] = strlen16(utfkey); /* Prepare for GPU */ utfkey[saved_len[index]] = 0x80; saved_len[index] <<= 1; new_keys = 1; //dump_stuff_msg("key buffer", &saved_key[index*UNICODE_LENGTH], UNICODE_LENGTH); } static void *get_salt(char *ciphertext) { int i, length; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy, *p; cur_salt = mem_calloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD); ctcopy += 9; /* skip over "$office$*" */ p = strtok(ctcopy, "*"); cur_salt->version = atoi(p); p = strtok(NULL, "*"); cur_salt->verifierHashSize = atoi(p); p = strtok(NULL, "*"); cur_salt->keySize = atoi(p); p = strtok(NULL, "*"); cur_salt->saltSize = atoi(p); if (cur_salt->saltSize > SALT_LENGTH) { fprintf(stderr, "** error: salt longer than supported:\n%s\n", ciphertext); cur_salt->saltSize = SALT_LENGTH; /* will not work, but protects us from segfault */ } p = strtok(NULL, "*"); for (i = 0; i < cur_salt->saltSize; i++) cur_salt->osalt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); for (i = 0; i < 16; i++) cur_salt->encryptedVerifier[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); length = strlen(p) / 2; for (i = 0; i < length; i++) cur_salt->encryptedVerifierHash[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)cur_salt; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; memcpy(saved_salt, cur_salt->osalt, SALT_LENGTH); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_salt, CL_FALSE, 0, SALT_LENGTH, saved_salt, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_salt"); } static int crypt_all(int *pcount, struct db_salt *salt); static int crypt_all_benchmark(int *pcount, struct db_salt *salt); static void init(struct fmt_main *self) { char build_opts[64]; static char valgo[32] = ""; if ((v_width = opencl_get_vector_width(gpu_id, sizeof(cl_int))) > 1) { /* Run vectorized kernel */ snprintf(valgo, sizeof(valgo), OCL_ALGORITHM_NAME " %ux" CPU_ALGORITHM_NAME, v_width); self->params.algorithm_name = valgo; } snprintf(build_opts, sizeof(build_opts), "-DHASH_LOOPS=%u -DUNICODE_LENGTH=%u -DV_WIDTH=%u", HASH_LOOPS, UNICODE_LENGTH, v_width); opencl_init("$JOHN/kernels/office2007_kernel.cl", gpu_id, build_opts); // create kernel to execute GenerateSHA1pwhash = clCreateKernel(program[gpu_id], "GenerateSHA1pwhash", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?"); crypt_kernel = clCreateKernel(program[gpu_id], "HashLoop", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?"); Generate2007key = clCreateKernel(program[gpu_id], "Generate2007key", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, HASH_LOOPS, split_events, warn, 3, self, create_clobj, release_clobj, UNICODE_LENGTH, 0); // Auto tune execution from shared/included code. self->methods.crypt_all = crypt_all_benchmark; autotune_run(self, ITERATIONS + 4, 0, (cpu(device_info[gpu_id]) ? 1000000000 : 10000000000ULL)); self->methods.crypt_all = crypt_all; self->params.min_keys_per_crypt = local_work_size * v_width; self->params.max_keys_per_crypt = global_work_size * v_width; if (pers_opts.target_enc == UTF_8) self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *ptr, *keeptr; int res; if (strncmp(ciphertext, "$office$*2007*", 14)) return 0; if (!(ctcopy = strdup(ciphertext))) { fprintf(stderr, "Memory allocation failed in %s, unable to check if hash is valid!", FORMAT_LABEL); return 0; } keeptr = ctcopy; ctcopy += 15; if (!(ptr = strtok(ctcopy, "*"))) /* hash size or iterations */ goto error; if (!(ptr = strtok(NULL, "*"))) goto error; if (strncmp(ptr, "128", 3) && strncmp(ptr, "256", 3)) /* key size */ goto error; if (!(ptr = strtok(NULL, "*"))) /* salt size */ goto error; res = atoi(ptr); if (res != 16) /* can we handle other values? */ goto error; if (!(ptr = strtok(NULL, "*"))) /* salt */ goto error; if (strlen(ptr) != res * 2) goto error; if (!ishex(ptr)) goto error; if (!(ptr = strtok(NULL, "*"))) /* encrypted verifier */ goto error; if (!ishex(ptr)) goto error; if (!(ptr = strtok(NULL, "*"))) /* encrypted verifier hash */ goto error; if (!ishex(ptr)) goto error; if (strlen(ptr) > 64) goto error; if ((ptr = strtok(NULL, "*"))) goto error; MEM_FREE(keeptr); return 1; error: MEM_FREE(keeptr); return 0; } static inline int PasswordVerifier(unsigned char *key) { unsigned char decryptedVerifier[16]; unsigned char decryptedVerifierHash[16]; AES_KEY akey; SHA_CTX ctx; unsigned char checkHash[20]; memset(&akey, 0, sizeof(AES_KEY)); if(AES_set_decrypt_key(key, 128, &akey) < 0) { fprintf(stderr, "AES_set_decrypt_key failed!\n"); return 0; } AES_ecb_encrypt(cur_salt->encryptedVerifier, decryptedVerifier, &akey, AES_DECRYPT); memset(&akey, 0, sizeof(AES_KEY)); if(AES_set_decrypt_key(key, 128, &akey) < 0) { fprintf(stderr, "AES_set_decrypt_key failed!\n"); return 0; } AES_ecb_encrypt(cur_salt->encryptedVerifierHash, decryptedVerifierHash, &akey, AES_DECRYPT); /* find SHA1 hash of decryptedVerifier */ SHA1_Init(&ctx); SHA1_Update(&ctx, decryptedVerifier, 16); SHA1_Final(checkHash, &ctx); return !memcmp(checkHash, decryptedVerifierHash, 16); } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index; size_t gws, scalar_gws; gws = ((count + (v_width * local_work_size - 1)) / (v_width * local_work_size)) * local_work_size; scalar_gws = gws * v_width; if (any_cracked) { memset(cracked, 0, count * sizeof(*cracked)); any_cracked = 0; } if (new_keys) { HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_key, CL_FALSE, 0, UNICODE_LENGTH * scalar_gws, saved_key, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_key"); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_len, CL_FALSE, 0, sizeof(int) * scalar_gws, saved_len, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_len"); new_keys = 0; } HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], GenerateSHA1pwhash, 1, NULL, &scalar_gws, &local_work_size, 0, NULL, firstEvent), "failed in clEnqueueNDRangeKernel"); for (index = 0; index < 50000 / HASH_LOOPS; index++) { HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, &local_work_size, 0, NULL, NULL), "failed in clEnqueueNDRangeKernel"); HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], Generate2007key, 1, NULL, &gws, &local_work_size, 0, NULL, lastEvent), "failed in clEnqueueNDRangeKernel"); // read back aes key HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], cl_key, CL_TRUE, 0, 16 * scalar_gws, key, 0, NULL, NULL), "failed in reading key back"); #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) if (PasswordVerifier(&key[index*16])) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } return count; } static int crypt_all_benchmark(int *pcount, struct db_salt *salt) { int count = *pcount; size_t gws, scalar_gws; gws = ((count + (v_width * local_work_size - 1)) / (v_width * local_work_size)) * local_work_size; scalar_gws = gws * v_width; BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_key, CL_FALSE, 0, UNICODE_LENGTH * scalar_gws, saved_key, 0, NULL, multi_profilingEvent[0]), "failed in clEnqueueWriteBuffer saved_key"); BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_len, CL_FALSE, 0, sizeof(int) * scalar_gws, saved_len, 0, NULL, multi_profilingEvent[1]), "failed in clEnqueueWriteBuffer saved_len"); BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], GenerateSHA1pwhash, 1, NULL, &scalar_gws, &local_work_size, 0, NULL, multi_profilingEvent[2]), "failed in clEnqueueNDRangeKernel"); BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, &local_work_size, 0, NULL, NULL), "failed in clEnqueueNDRangeKernel"); BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, &local_work_size, 0, NULL, multi_profilingEvent[3]), "failed in clEnqueueNDRangeKernel"); BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], Generate2007key, 1, NULL, &gws, &local_work_size, 0, NULL, multi_profilingEvent[4]), "failed in clEnqueueNDRangeKernel"); // read back aes key BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], cl_key, CL_TRUE, 0, 16 * scalar_gws, key, 0, NULL, multi_profilingEvent[5]), "failed in reading key back"); return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static char *get_key(int index) { UTF16 buf[PLAINTEXT_LENGTH + 1]; memcpy(buf, &saved_key[index * UNICODE_LENGTH], saved_len[index]); buf[saved_len[index] >> 1] = 0; return (char*)utf16_to_enc(buf); } struct fmt_main fmt_opencl_office2007 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, set_salt, set_key, get_key, clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
GB_unop__identity_bool_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_bool_int32) // op(A') function: GB (_unop_tran__identity_bool_int32) // C type: bool // A type: int32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (bool) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (bool) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_bool_int32) ( bool *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_bool_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
detector.c
#include "darknet.h" static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90}; void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear) { list *options = read_data_cfg(datacfg); char *train_images = option_find_str(options, "train", "data/train.list"); char *backup_directory = option_find_str(options, "backup", "/backup/"); srand(time(0)); char *base = basecfg(cfgfile); printf("%s\n", base); float avg_loss = -1; network **nets = calloc(ngpus, sizeof(network)); srand(time(0)); int seed = rand(); int i; for(i = 0; i < ngpus; ++i){ srand(seed); #ifdef GPU cuda_set_device(gpus[i]); #endif nets[i] = load_network(cfgfile, weightfile, clear); nets[i]->learning_rate *= ngpus; } srand(time(0)); network *net = nets[0]; int imgs = net->batch * net->subdivisions * ngpus; printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); data train, buffer; layer l = net->layers[net->n - 1]; int classes = l.classes; float jitter = l.jitter; list *plist = get_paths(train_images); //int N = plist->size; char **paths = (char **)list_to_array(plist); load_args args = get_base_args(net); args.coords = l.coords; args.paths = paths; args.n = imgs; args.m = plist->size; args.classes = classes; args.jitter = jitter; args.num_boxes = l.max_boxes; args.d = &buffer; args.type = DETECTION_DATA; //args.type = INSTANCE_DATA; args.threads = 64; pthread_t load_thread = load_data(args); double time; int count = 0; //while(i*imgs < N*120){ while(get_current_batch(net) < net->max_batches){ if(l.random && count++%10 == 0){ printf("Resizing\n"); int dim = (rand() % 10 + 10) * 32; if (get_current_batch(net)+200 > net->max_batches) dim = 608; //int dim = (rand() % 4 + 16) * 32; printf("%d\n", dim); args.w = dim; args.h = dim; pthread_join(load_thread, 0); train = buffer; free_data(train); load_thread = load_data(args); #pragma omp parallel for for(i = 0; i < ngpus; ++i){ resize_network(nets[i], dim, dim); } net = nets[0]; } time=what_time_is_it_now(); pthread_join(load_thread, 0); train = buffer; load_thread = load_data(args); /* int k; for(k = 0; k < l.max_boxes; ++k){ box b = float_to_box(train.y.vals[10] + 1 + k*5); if(!b.x) break; printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h); } */ /* int zz; for(zz = 0; zz < train.X.cols; ++zz){ image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]); int k; for(k = 0; k < l.max_boxes; ++k){ box b = float_to_box(train.y.vals[zz] + k*5, 1); printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); draw_bbox(im, b, 1, 1,0,0); } show_image(im, "truth11"); cvWaitKey(0); save_image(im, "truth11"); } */ printf("Loaded: %lf seconds\n", what_time_is_it_now()-time); time=what_time_is_it_now(); float loss = 0; #ifdef GPU if(ngpus == 1){ loss = train_network(net, train); } else { loss = train_networks(nets, ngpus, train, 4); } #else loss = train_network(net, train); #endif if (avg_loss < 0) avg_loss = loss; avg_loss = avg_loss*.9 + loss*.1; i = get_current_batch(net); printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs); if(i%100==0){ #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s.backup", backup_directory, base); save_weights(net, buff); } if(i%10000==0 || (i < 1000 && i%100 == 0)){ #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i); save_weights(net, buff); } free_data(train); } #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s_final.weights", backup_directory, base); save_weights(net, buff); } static int get_coco_image_id(char *filename) { char *p = strrchr(filename, '/'); char *c = strrchr(filename, '_'); if(c) p = c; return atoi(p+1); } static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h) { int i, j; int image_id = get_coco_image_id(image_path); for(i = 0; i < num_boxes; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2.; float xmax = dets[i].bbox.x + dets[i].bbox.w/2.; float ymin = dets[i].bbox.y - dets[i].bbox.h/2.; float ymax = dets[i].bbox.y + dets[i].bbox.h/2.; if (xmin < 0) xmin = 0; if (ymin < 0) ymin = 0; if (xmax > w) xmax = w; if (ymax > h) ymax = h; float bx = xmin; float by = ymin; float bw = xmax - xmin; float bh = ymax - ymin; for(j = 0; j < classes; ++j){ if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]); } } } void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h) { int i, j; for(i = 0; i < total; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1; float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1; float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1; float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1; if (xmin < 1) xmin = 1; if (ymin < 1) ymin = 1; if (xmax > w) xmax = w; if (ymax > h) ymax = h; for(j = 0; j < classes; ++j){ if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j], xmin, ymin, xmax, ymax); } } } void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h) { int i, j; for(i = 0; i < total; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2.; float xmax = dets[i].bbox.x + dets[i].bbox.w/2.; float ymin = dets[i].bbox.y - dets[i].bbox.h/2.; float ymax = dets[i].bbox.y + dets[i].bbox.h/2.; if (xmin < 0) xmin = 0; if (ymin < 0) ymin = 0; if (xmax > w) xmax = w; if (ymax > h) ymax = h; for(j = 0; j < classes; ++j){ int class = j; if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class], xmin, ymin, xmax, ymax); } } } void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile) { int j; list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "data/train.list"); char *name_list = option_find_str(options, "names", "data/names.list"); char *prefix = option_find_str(options, "results", "results"); char **names = get_labels(name_list); char *mapf = option_find_str(options, "map", 0); int *map = 0; if (mapf) map = read_map(mapf); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 2); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int classes = l.classes; char buff[1024]; char *type = option_find_str(options, "eval", "voc"); FILE *fp = 0; FILE **fps = 0; int coco = 0; int imagenet = 0; if(0==strcmp(type, "coco")){ if(!outfile) outfile = "coco_results"; snprintf(buff, 1024, "%s/%s.json", prefix, outfile); fp = fopen(buff, "w"); fprintf(fp, "[\n"); coco = 1; } else if(0==strcmp(type, "imagenet")){ if(!outfile) outfile = "imagenet-detection"; snprintf(buff, 1024, "%s/%s.txt", prefix, outfile); fp = fopen(buff, "w"); imagenet = 1; classes = 200; } else { if(!outfile) outfile = "comp4_det_test_"; fps = calloc(classes, sizeof(FILE *)); for(j = 0; j < classes; ++j){ snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]); fps[j] = fopen(buff, "w"); } } int m = plist->size; int i=0; int t; float thresh = .005; float nms = .45; int nthreads = 4; image *val = calloc(nthreads, sizeof(image)); image *val_resized = calloc(nthreads, sizeof(image)); image *buf = calloc(nthreads, sizeof(image)); image *buf_resized = calloc(nthreads, sizeof(image)); pthread_t *thr = calloc(nthreads, sizeof(pthread_t)); image input = make_image(net->w, net->h, net->c*2); load_args args = {0}; args.w = net->w; args.h = net->h; //args.type = IMAGE_DATA; args.type = LETTERBOX_DATA; for(t = 0; t < nthreads; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } double start = what_time_is_it_now(); for(i = nthreads; i < m+nthreads; i += nthreads){ fprintf(stderr, "%d\n", i); for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for(t = 0; t < nthreads && i+t < m; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ char *path = paths[i+t-nthreads]; char *id = basecfg(path); copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1); flip_image(val_resized[t]); copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1); network_predict(net, input.data); int w = val[t].w; int h = val[t].h; int num = 0; detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num); if (nms) do_nms_sort(dets, num, classes, nms); if (coco){ print_cocos(fp, path, dets, num, classes, w, h); } else if (imagenet){ print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h); } else { print_detector_detections(fps, id, dets, num, classes, w, h); } free_detections(dets, num); free(id); free_image(val[t]); free_image(val_resized[t]); } } for(j = 0; j < classes; ++j){ if(fps) fclose(fps[j]); } if(coco){ fseek(fp, -2, SEEK_CUR); fprintf(fp, "\n]\n"); fclose(fp); } fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start); } void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile) { int j; list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "data/train.list"); char *name_list = option_find_str(options, "names", "data/names.list"); char *prefix = option_find_str(options, "results", "results"); char **names = get_labels(name_list); char *mapf = option_find_str(options, "map", 0); int *map = 0; if (mapf) map = read_map(mapf); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int classes = l.classes; char buff[1024]; char *type = option_find_str(options, "eval", "voc"); FILE *fp = 0; FILE **fps = 0; int coco = 0; int imagenet = 0; if(0==strcmp(type, "coco")){ if(!outfile) outfile = "coco_results"; snprintf(buff, 1024, "%s/%s.json", prefix, outfile); fp = fopen(buff, "w"); fprintf(fp, "[\n"); coco = 1; } else if(0==strcmp(type, "imagenet")){ if(!outfile) outfile = "imagenet-detection"; snprintf(buff, 1024, "%s/%s.txt", prefix, outfile); fp = fopen(buff, "w"); imagenet = 1; classes = 200; } else { if(!outfile) outfile = "comp4_det_test_"; fps = calloc(classes, sizeof(FILE *)); for(j = 0; j < classes; ++j){ snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]); fps[j] = fopen(buff, "w"); } } int m = plist->size; int i=0; int t; float thresh = .005; float nms = .45; int nthreads = 4; image *val = calloc(nthreads, sizeof(image)); image *val_resized = calloc(nthreads, sizeof(image)); image *buf = calloc(nthreads, sizeof(image)); image *buf_resized = calloc(nthreads, sizeof(image)); pthread_t *thr = calloc(nthreads, sizeof(pthread_t)); load_args args = {0}; args.w = net->w; args.h = net->h; //args.type = IMAGE_DATA; args.type = LETTERBOX_DATA; for(t = 0; t < nthreads; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } double start = what_time_is_it_now(); for(i = nthreads; i < m+nthreads; i += nthreads){ fprintf(stderr, "%d\n", i); for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for(t = 0; t < nthreads && i+t < m; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ char *path = paths[i+t-nthreads]; char *id = basecfg(path); float *X = val_resized[t].data; network_predict(net, X); int w = val[t].w; int h = val[t].h; int nboxes = 0; detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes); if (nms) do_nms_sort(dets, nboxes, classes, nms); if (coco){ print_cocos(fp, path, dets, nboxes, classes, w, h); } else if (imagenet){ print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h); } else { print_detector_detections(fps, id, dets, nboxes, classes, w, h); } free_detections(dets, nboxes); free(id); free_image(val[t]); free_image(val_resized[t]); } } for(j = 0; j < classes; ++j){ if(fps) fclose(fps[j]); } if(coco){ fseek(fp, -2, SEEK_CUR); fprintf(fp, "\n]\n"); fclose(fp); } fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start); } void validate_detector_recall(char *cfgfile, char *weightfile) { network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths("data/coco_val_5k.list"); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int j, k; int m = plist->size; int i=0; float thresh = .001; float iou_thresh = .5; float nms = .4; int total = 0; int correct = 0; int proposals = 0; float avg_iou = 0; for(i = 0; i < m; ++i){ char *path = paths[i]; image orig = load_image_color(path, 0, 0); image sized = resize_image(orig, net->w, net->h); char *id = basecfg(path); network_predict(net, sized.data); int nboxes = 0; detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes); if (nms) do_nms_obj(dets, nboxes, 1, nms); char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int num_labels = 0; box_label *truth = read_boxes(labelpath, &num_labels); for(k = 0; k < nboxes; ++k){ if(dets[k].objectness > thresh){ ++proposals; } } for (j = 0; j < num_labels; ++j) { ++total; box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h}; float best_iou = 0; for(k = 0; k < l.w*l.h*l.n; ++k){ float iou = box_iou(dets[k].bbox, t); if(dets[k].objectness > thresh && iou > best_iou){ best_iou = iou; } } avg_iou += best_iou; if(best_iou > iou_thresh){ ++correct; } } fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total); free(id); free_image(orig); free_image(sized); } } void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen, int hflip) { list *options = read_data_cfg(datacfg); char *name_list = option_find_str(options, "names", "data/names.list"); char **names = get_labels(name_list); image **alphabet = load_alphabet(); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); double time; char buff[256]; char *input = buff; float nms=.45; while(1){ if(filename){ strncpy(input, filename, 256); } else { printf("Enter Image Path: "); fflush(stdout); input = fgets(input, 256, stdin); if(!input) return; strtok(input, "\n"); } image im = load_image_color(input,0,0); if (hflip) { flip_image(im); } image sized = letterbox_image(im, net->w, net->h); //image sized = resize_image(im, net->w, net->h); //image sized2 = resize_max(im, net->w); //image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h); //resize_network(net, sized.w, sized.h); layer l = net->layers[net->n-1]; float *X = sized.data; time=what_time_is_it_now(); network_predict(net, X); printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time); int nboxes = 0; detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes); //printf("%d\n", nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes, hflip); free_detections(dets, nboxes); if(outfile){ save_image(im, outfile); } else{ save_image(im, "predictions"); #ifdef OPENCV make_window("predictions", 512, 512, 0); show_image(im, "predictions", 0); #endif } free_image(im); free_image(sized); if (filename) break; } } /* void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip) { #ifdef OPENCV char *base = basecfg(cfgfile); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); CvCapture * cap; int w = 1280; int h = 720; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } if(w){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w); } if(h){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h); } if(!cap) error("Couldn't connect to webcam.\n"); cvNamedWindow(base, CV_WINDOW_NORMAL); cvResizeWindow(base, 512, 512); float fps = 0; int i; float nms = .45; while(1){ image in = get_image_from_stream(cap); //image in_s = resize_image(in, net->w, net->h); image in_s = letterbox_image(in, net->w, net->h); layer l = net->layers[net->n-1]; float *X = in_s.data; network_predict(net, X); int nboxes = 0; detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); for(i = 0; i < nboxes; ++i){ if(dets[i].prob[class] > thresh){ box b = dets[i].bbox; int left = b.x-b.w/2.; int top = b.y-b.h/2.; censor_image(in, left, top, b.w, b.h); } } show_image(in, base); cvWaitKey(10); free_detections(dets, nboxes); free_image(in_s); free_image(in); float curr = 0; fps = .9*fps + .1*curr; for(i = 0; i < skip; ++i){ image in = get_image_from_stream(cap); free_image(in); } } #endif } void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip) { #ifdef OPENCV char *base = basecfg(cfgfile); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); CvCapture * cap; int w = 1280; int h = 720; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } if(w){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w); } if(h){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h); } if(!cap) error("Couldn't connect to webcam.\n"); cvNamedWindow(base, CV_WINDOW_NORMAL); cvResizeWindow(base, 512, 512); float fps = 0; int i; int count = 0; float nms = .45; while(1){ image in = get_image_from_stream(cap); //image in_s = resize_image(in, net->w, net->h); image in_s = letterbox_image(in, net->w, net->h); layer l = net->layers[net->n-1]; show_image(in, base); int nboxes = 0; float *X = in_s.data; network_predict(net, X); detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); for(i = 0; i < nboxes; ++i){ if(dets[i].prob[class] > thresh){ box b = dets[i].bbox; int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h; int dx = b.x*in.w-size/2.; int dy = b.y*in.h-size/2.; image bim = crop_image(in, dx, dy, size, size); char buff[2048]; sprintf(buff, "results/extract/%07d", count); ++count; save_image(bim, buff); free_image(bim); } } free_detections(dets, nboxes); free_image(in_s); free_image(in); float curr = 0; fps = .9*fps + .1*curr; for(i = 0; i < skip; ++i){ image in = get_image_from_stream(cap); free_image(in); } } #endif } */ /* void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets) { network_predict_image(net, im); layer l = net->layers[net->n-1]; int nboxes = num_boxes(net); fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); } */ void run_detector(int argc, char **argv) { char *prefix = find_char_arg(argc, argv, "-prefix", 0); float thresh = find_float_arg(argc, argv, "-thresh", .5); float hier_thresh = find_float_arg(argc, argv, "-hier", .5); int cam_index = find_int_arg(argc, argv, "-c", 0); int frame_skip = find_int_arg(argc, argv, "-s", 0); int hflip = find_int_arg(argc, argv, "-hflip", 0); int avg = find_int_arg(argc, argv, "-avg", 3); if(argc < 4){ fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]); return; } char *gpu_list = find_char_arg(argc, argv, "-gpus", 0); char *outfile = find_char_arg(argc, argv, "-out", 0); int *gpus = 0; int gpu = 0; int ngpus = 0; if(gpu_list){ printf("%s\n", gpu_list); int len = strlen(gpu_list); ngpus = 1; int i; for(i = 0; i < len; ++i){ if (gpu_list[i] == ',') ++ngpus; } gpus = calloc(ngpus, sizeof(int)); for(i = 0; i < ngpus; ++i){ gpus[i] = atoi(gpu_list); gpu_list = strchr(gpu_list, ',')+1; } } else { gpu = gpu_index; gpus = &gpu; ngpus = 1; } int clear = find_arg(argc, argv, "-clear"); int fullscreen = find_arg(argc, argv, "-fullscreen"); int width = find_int_arg(argc, argv, "-w", 0); int height = find_int_arg(argc, argv, "-h", 0); int fps = find_int_arg(argc, argv, "-fps", 0); //int class = find_int_arg(argc, argv, "-class", 0); char *datacfg = argv[3]; char *cfg = argv[4]; char *weights = (argc > 5) ? argv[5] : 0; char *filename = (argc > 7) ? argv[7]: 0; if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen, hflip); else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear); else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile); else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile); else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights); else if(0==strcmp(argv[2], "demo")) { list *options = read_data_cfg(datacfg); int classes = option_find_int(options, "classes", 20); char *name_list = option_find_str(options, "names", "data/names.list"); char **names = get_labels(name_list); demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen); } //else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip); //else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip); }
Functions.h
// // smarties // Copyright (c) 2018 CSE-Lab, ETH Zurich, Switzerland. All rights reserved. // Distributed under the terms of the MIT license. // // Created by Guido Novati (novatig@ethz.ch). // #ifndef smarties_Function_h #define smarties_Function_h #include "../../Utils/FunctionUtilities.h" #include "../../Utils/Warnings.h" #include <memory> #ifndef PRELU_FAC #define PRELU_FAC 0.1 #endif //List of non-linearities for neural networks //- eval return f(in), also present as array in / array out //- evalDiff returns f'(x) //- initFactor: some prefer fan in fan out, some only fan-in dependency //If adding a new function, edit function readFunction at end of file namespace smarties { struct Function { //weights are initialized with uniform distrib [-weightsInitFactor, weightsInitFactor] virtual Real initFactor(const Uint inps, const Uint outs) const = 0; virtual void eval(const nnReal*const in, nnReal*const out, const Uint N) const = 0; // f(in) virtual nnReal eval(const nnReal in) const = 0; virtual nnReal inverse(const nnReal in) const = 0; // f(in) virtual nnReal evalDiff(const nnReal in, const nnReal out) const = 0; // f'(in) virtual std::string name() const = 0; virtual ~Function() {} }; struct Linear : public Function { std::string name() const override { return "Linear";} Real initFactor(const Uint inps, const Uint outs) const override { return std::sqrt(1./inps); } static Real _initFactor(const Uint inps, const Uint outs) { return std::sqrt(1./inps); } void eval(const nnReal*const in, nnReal*const out, const Uint N) const override { memcpy(out, in, N*sizeof(nnReal)); } static void _eval(const nnReal*const in, nnReal*const out, const Uint N) { memcpy(out, in, N*sizeof(nnReal)); } template <typename T> static T _eval(const T in) { return in; } template <typename T> static T _evalDiff(const T in, const T out) { return 1; } nnReal eval(const nnReal in) const override { return in; } nnReal inverse(const nnReal in) const override { return in; } nnReal evalDiff(const nnReal in, const nnReal out) const override { return 1; } }; struct Tanh : public Function { std::string name() const override { return "Tanh"; } Real initFactor(const Uint inps, const Uint outs) const override { return std::sqrt(6./(inps + outs)); } static Real _initFactor(const Uint inps, const Uint outs) { return std::sqrt(6./(inps + outs)); } template <typename T> static T _eval(const T in) { if(in > 0) { const T e2x = std::exp(-2*in); return (1-e2x)/(1+e2x); } else { const T e2x = std::exp( 2*in); return (e2x-1)/(1+e2x); } } template <typename T> static T _evalDiff(const T in, const T out) { return 1 - out*out; } static void _eval(const nnReal*const in, nnReal*const out, const Uint N) { for(Uint i=0; i<N; ++i) out[i] = _eval(in[i]); } void eval(const nnReal*const in, nnReal*const out, const Uint N) const override { for(Uint i=0; i<N; ++i) out[i] = _eval(in[i]); } nnReal eval(const nnReal in) const override { return _eval(in); } nnReal inverse(const nnReal in) const override { assert(std::fabs(in)<1); return std::log((1+in)/(1-in)) / 2; } nnReal evalDiff(const nnReal in, const nnReal out) const override { return _evalDiff(in, out); } }; struct Sigm : public Function { std::string name() const override { return "Sigm";} Real initFactor(const Uint inps, const Uint outs) const override { return std::sqrt(6./(inps + outs)); } static Real _initFactor(const Uint inps, const Uint outs) { return std::sqrt(6./(inps + outs)); } template <typename T> static T _eval(const T in) { if(in > 0) return 1/(1+Utilities::safeExp(-in)); else { const T ex = Utilities::safeExp(in); return ex/(1+ex); } } template <typename T> static T _inv(const T in) { assert(in > 0 && in < 1); return - std::log(1/in - 1); } template <typename T> static T _evalDiff(const T in) { const T expx = Utilities::safeExp(in); return expx / std::pow(expx+1, 2); } template <typename T> static T _evalDiff(const T in, const T out) { return out*(1-out); } static void _eval(const nnReal*const in, nnReal*const out, const Uint N) { for(Uint i=0; i<N; ++i) out[i] = _eval(in[i]); } void eval(const nnReal*const in, nnReal*const out, const Uint N) const override { for(Uint i=0; i<N; ++i) out[i] = _eval(in[i]); } nnReal eval(const nnReal in) const override { return _eval(in); } nnReal inverse(const nnReal in) const override { return _inv(in); } nnReal evalDiff(const nnReal in, const nnReal out) const override { return _evalDiff(in, out); } }; struct HardSign : public Function { std::string name() const override { return "HardSign";} Real initFactor(const Uint inps, const Uint outs) const override { return std::sqrt(6./(inps + outs)); } static Real _initFactor(const Uint inps, const Uint outs) { return std::sqrt(6./(inps + outs)); } template <typename T> static T _eval(const T in) { return in/std::sqrt(1+in*in); } template <typename T> static T _evalDiff(const T in, const T out) { const T denom = std::sqrt(1+in*in); return 1/(denom*denom*denom); } static void _eval(const nnReal*const in, nnReal*const out, const Uint N) { #pragma omp simd aligned(in,out : VEC_WIDTH) for (Uint i=0; i<N; ++i) out[i] = in[i]/std::sqrt(1+in[i]*in[i]); } void eval(const nnReal*const in, nnReal*const out, const Uint N) const override { return _eval(in, out, N); } nnReal eval(const nnReal in) const override { return _eval(in); } nnReal inverse(const nnReal in) const override { assert(in > 0 && in < 1); return in/std::sqrt(1 -in*in); } nnReal evalDiff(const nnReal in, const nnReal out) const override { return _evalDiff(in, out); } }; struct HardSigmoid : public Function { std::string name() const override { return "HardSigmoid";} Real initFactor(const Uint inps, const Uint outs) const override { return std::sqrt(6./(inps + outs)); } static Real _initFactor(const Uint inps, const Uint outs) { return std::sqrt(6./(inps + outs)); } template <typename T> static T _eval(const T x) { return 0.5 * (1 + x/std::sqrt(1+x*x)); } template <typename T> static T _evalDiff(const T x, const T y) { const T denom = std::sqrt(1+x*x); return 0.5/(denom*denom*denom); } template <typename T> static T _evalDiff(const T x) { const T denom = std::sqrt(1+x*x); return 0.5/(denom*denom*denom); } template <typename T> static T _inv(const T y) { assert(y > 0 && y < 1); const Real map = 2 * y - 1; return map/std::sqrt(1 -map*map); } static void _eval(const nnReal*const in, nnReal*const out, const Uint N) { #pragma omp simd aligned(in,out : VEC_WIDTH) for (Uint i=0; i<N; ++i) out[i] = _eval(in[i]); } void eval(const nnReal*const in, nnReal*const out, const Uint N) const override { return _eval(in, out, N); } nnReal eval(const nnReal in) const override { return _eval(in); } nnReal inverse(const nnReal y) const override { return _inv(y); } nnReal evalDiff(const nnReal in, const nnReal out) const override { return _evalDiff(in, out); } }; struct SoftSign : public Function { std::string name() const override { return "SoftSign";} Real initFactor(const Uint inps, const Uint outs) const override { return std::sqrt(6.0/(inps + outs)); } static Real _initFactor(const Uint inps, const Uint outs) { return std::sqrt(6.0/(inps + outs)); } template <typename T> static T _eval(const T in) { return in/(1 + std::fabs(in)); } template <typename T> static T _evalDiff(const T in, const T out) { const T denom = 1 + std::fabs(in); return 1/(denom*denom); } static void _eval(const nnReal*const in, nnReal*const out, const Uint N) { #pragma omp simd aligned(in,out : VEC_WIDTH) for (Uint i=0;i<N; ++i) out[i] = _eval(in[i]); } void eval(const nnReal*const in, nnReal*const out, const Uint N) const override { return _eval(in, out, N); } nnReal eval(const nnReal in) const override { return _eval(in); } nnReal inverse(const nnReal in) const override { assert(in > 0 && in < 1); return in / (1 - std::fabs(in)); } nnReal evalDiff(const nnReal in, const nnReal out) const override { return _evalDiff(in, out); } }; struct SoftRBF : public Function { std::string name() const override { return "SoftRBF";} Real initFactor(const Uint inps, const Uint outs) const override { return std::sqrt(6.0/(inps + outs)); } static Real _initFactor(const Uint inps, const Uint outs) { return std::sqrt(6.0/(inps + outs)); } template <typename T> static T _eval(const T in) { return 1/(1 + in * in); } template <typename T> static T _evalDiff(const T in, const T out) { const T denom = 1 + in * in; return - 2 * in / (denom * denom); } static void _eval(const nnReal*const in, nnReal*const out, const Uint N) { #pragma omp simd aligned(in,out : VEC_WIDTH) for (Uint i=0;i<N; ++i) out[i] = _eval(in[i]); } void eval(const nnReal*const in, nnReal*const out, const Uint N) const override { return _eval(in, out, N); } nnReal eval(const nnReal in) const override { return _eval(in); } nnReal inverse(const nnReal in) const override { die("Not supported"); return in / (1 - std::fabs(in)); } nnReal evalDiff(const nnReal in, const nnReal out) const override { return _evalDiff(in, out); } }; struct Relu : public Function { std::string name() const override { return "Relu";} Real initFactor(const Uint inps, const Uint outs) const override { return std::sqrt(2./inps); } static Real _initFactor(const Uint inps, const Uint outs) { return std::sqrt(2./inps); } template <typename T> static T _eval(const T in) { return in>0 ? in : 0; } template <typename T> static T _evalDiff(const T in, const T out) { return in>0 ? 1 : 0; } static void _eval(const nnReal*const in, nnReal*const out, const Uint N) { #pragma omp simd aligned(in,out : VEC_WIDTH) for (Uint i=0;i<N; ++i) out[i] = in[i]>0 ? in[i] : 0; } void eval(const nnReal*const in, nnReal*const out, const Uint N) const override { return _eval(in, out, N); } nnReal eval(const nnReal in) const override { return _eval(in); } nnReal inverse(const nnReal in) const override { assert(in>=0); return in; } nnReal evalDiff(const nnReal in, const nnReal out) const override { return _evalDiff(in, out); } }; struct LRelu : public Function { std::string name() const override { return "LRelu";} Real initFactor(const Uint inps, const Uint outs) const override { return std::sqrt(1.0/inps); } static Real _initFactor(const Uint inps, const Uint outs) { return std::sqrt(1.0/inps); } template <typename T> static T _eval(const T in) { return in>0 ? in : PRELU_FAC*in; } template <typename T> static T _evalDiff(const T in, const T out) { return in>0 ? 1 : PRELU_FAC; } static void _eval(const nnReal*const in, nnReal*const out, const Uint N) { #pragma omp simd aligned(in,out : VEC_WIDTH) for (Uint i=0;i<N; ++i) out[i] = in[i]>0 ? in[i] : PRELU_FAC*in[i]; } void eval(const nnReal*const in, nnReal*const out, const Uint N) const override { return _eval(in, out, N); } nnReal eval(const nnReal in) const override { return _eval(in); } nnReal inverse(const nnReal in) const override { if(in >= 0) return in; else return in / PRELU_FAC; } nnReal evalDiff(const nnReal in, const nnReal out) const override { return _evalDiff(in, out); } }; struct ExpPlus : public Function { std::string name() const override { return "ExpPlus";} Real initFactor(const Uint inps, const Uint outs) const override { return std::sqrt(2./inps); } static Real _initFactor(const Uint inps, const Uint outs) { return std::sqrt(2./inps); } template <typename T> static T _inv(const T in) { return std::log(Utilities::safeExp(in) - 1); } // Used here, std::exp is trigger happy with nans, therefore we clip it // between exp(-32) and exp(16). template <typename T> static T _eval(const T in) { return std::log(1 + Utilities::safeExp(in)); } template <typename T> static T _evalDiff(const T in) { return 1/(1 + Utilities::safeExp(-in)); } template <typename T> static T _evalDiff(const T in, const T out) { return 1/(1 + Utilities::safeExp(-in)); } static void _eval(const nnReal*const in, nnReal*const out, const Uint N) { for(Uint i=0; i<N; ++i) out[i] = _eval(in[i]); } void eval(const nnReal*const in, nnReal*const out, const Uint N) const override { return _eval(in, out, N); } nnReal eval(const nnReal in) const override { return _eval(in); } nnReal inverse(const nnReal in) const override { return _inv(in); } nnReal evalDiff(const nnReal in, const nnReal out) const override { return _evalDiff(in, out); } }; struct SoftPlus : public Function { std::string name() const override { return "SoftPlus";} Real initFactor(const Uint inps, const Uint outs) const override { return std::sqrt(2./inps); } static Real _initFactor(const Uint inps, const Uint outs) { return std::sqrt(2./inps); } template <typename T> static T _eval(const T in) { return (in + std::sqrt(1+in*in)) / 2; } template <typename T> static T _evalDiff(const T in) { return (1 + in/std::sqrt(1+in*in)) / 2; } template <typename T> static T _evalDiff(const T in, const T out) { return (1 + in/std::sqrt(1+in*in)) / 2; } template <typename T> static T _inv(const T in) { assert(in > 0); return (in*in - (T)0.25)/in; } static void _eval(const nnReal*const in, nnReal*const out, const Uint N) { #pragma omp simd aligned(in,out : VEC_WIDTH) for (Uint i=0;i<N; ++i) out[i] = (in[i] + std::sqrt(1+in[i]*in[i])) / 2; } void eval(const nnReal*const in, nnReal*const out, const Uint N) const override { return _eval(in, out, N); } nnReal eval(const nnReal in) const override { return _eval(in); } nnReal inverse(const nnReal in) const override { return _inv(in); } nnReal evalDiff(const nnReal in, const nnReal out) const override { return _evalDiff(in, out); } }; struct Exp : public Function { std::string name() const override { return "Exp";} Real initFactor(const Uint inps, const Uint outs) const override { return std::sqrt(2./inps); } static Real _initFactor(const Uint inps, const Uint outs) { return std::sqrt(2./inps); } template <typename T> static T _inv(const T in) { return std::log(in); } template <typename T> static T _eval(const T in) { return Utilities::nnSafeExp(in); } template <typename T> static T _evalDiff(const T in) { return Utilities::nnSafeExp(in); } template <typename T> static T _evalDiff(const T in, const T out) { return out; } static void _eval(const nnReal*const in, nnReal*const out, const Uint N) { for(Uint i=0; i<N; ++i) out[i] = Utilities::nnSafeExp(in[i]); } void eval(const nnReal*const in, nnReal*const out, const Uint N) const override { return _eval(in, out, N); } nnReal eval(const nnReal in) const override { return _eval(in); } nnReal inverse(const nnReal in) const override { assert(in > 0); return std::log(in); } nnReal evalDiff(const nnReal in, const nnReal out) const override { return _evalDiff(in, out); } }; inline std::unique_ptr<Function> makeFunction(const std::string name, const bool bOutput=false) { if (bOutput || name == "Linear") return std::make_unique<Linear>(); else if (name == "Tanh") return std::make_unique<Tanh>(); else if (name == "Sigm") return std::make_unique<Sigm>(); else if (name == "HardSign") return std::make_unique<HardSign>(); else if (name == "SoftSign") return std::make_unique<SoftSign>(); else if (name == "Relu") return std::make_unique<Relu>(); else if (name == "LRelu") return std::make_unique<LRelu>(); else if (name == "ExpPlus") return std::make_unique<ExpPlus>(); else if (name == "SoftPlus") return std::make_unique<SoftPlus>(); else if (name == "Exp") return std::make_unique<Exp>(); else die("Activation function not recognized"); return std::make_unique<Linear>(); } } // end namespace smarties #endif // smarties_Quadratic_term_h
sw-post.c
/* * In this module, we are given the results of a full SW run, * and we compute two things: * * 1. The probability the location produced the read (over all possible alignments). * Currently, we only sum over all alignments respecting the current gaps. * As a result, this is useless to do in letter space, where the mapper * cannot distinguish between errors and SNPs. * * 2. For each output letter, the probability that it is correct. * Only for color space. In letter space, this is given by the base quality value. * * Both are computed as scores. */ #include <assert.h> #include <ctype.h> #include <errno.h> #include <math.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <unistd.h> #include <zlib.h> #include <limits.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/time.h> #include "../common/fasta.h" #include "../common/util.h" #include "../common/sw-post.h" #include "../common/sw-full-common.h" #include "../common/time_counter.h" static int initialized; static double pr_snp; static double pr_xover; static double pr_del_open; static double pr_del_extend; static double pr_ins_open; static double pr_ins_extend; static bool use_read_qvs; static bool use_sanger_qvs; static int default_qual; // if no qvs, use this instead static int qual_vector_offset; // i.e. is there a useless qv for the initial base in cs? static int qual_delta; // how much to subtract from chars to get the int static int init_bp; static int len; //static double neglogsixteenth; //static double neglogfourth; typedef struct column{ double forwards[16]; //we'll misuse this for the viterbi double backwards[16]; double forwscale; //adding numerical stability double backscale; //adding numerical stability int ncols; int nlets; int letssize; int colssize; int* lets; int* cols; double* letserrrate; double* colserrrate; char backpointer[16]; //previous state for viterbi double posterior[4]; int max_posterior; int base_call; } states; static struct column * columns; static int max_len; static uint64_t cells, invocs; static time_counter tc; static int check; #pragma omp threadprivate(initialized,\ pr_snp,pr_xover,pr_del_open,pr_del_extend,pr_ins_open,pr_ins_extend,\ use_read_qvs,use_sanger_qvs,default_qual,qual_vector_offset,qual_delta,\ init_bp,len,columns,max_len,\ tc,cells,invocs,check) /********************************************************************************* * * BEGIN Forward-backward code * *********************************************************************************/ #define left(i) ( ((i) >> 2) & 3) #define right(i) ( (i) & 3) #define MIN2(i,j) ( ((i)< (j))? (i):(j)) /* In order to understand any of the code below, you need to understand color-space; Specifically that LETTER ^ LETTER = COLOR : T (00) ^ C (10) = 2 (10), etc. And that LETTER ^ COLOR = NEXTLETTER: T (00) ^ 3 (11) = A (11). */ /* compute prior probability of the node given the emissions. letters are thought to be at the left "side" of the pair emitted by the node */ double nodePrior(states* allstates, int i, int j) { //i is state, j is node in the state double val = 0; double errrate; int let, col, k; for (k = 0; k < allstates[i].nlets; k++) { let = allstates[i].lets[k]; errrate = allstates[i].letserrrate[k]; if (right(j) == let) { val = val - log(1-errrate); } else { val = val - log(errrate/3.0); } } //fprintf(stderr, "nodeprior: %g", val); for (k = 0; k < allstates[i].ncols; k++) { col = allstates[i].cols[k]; errrate = allstates[i].colserrrate[k]; if ((left(j) ^ right(j)) == col) { val = val - log(1-errrate); } else { val = val - log(errrate/3.0); } //fprintf(stderr, " %g\n", val); } return val; } /* Little helper for debugging */ void printStates(states* allstates, int stateslen, FILE* stream) { int i,j,k; fprintf(stream, "\nCONTIG %d", stateslen); for (i=0; i< stateslen; i++) { fprintf(stream, "\nCOLORS[%d] ",i); for (k = 0; k < allstates[i].ncols; k++) { fprintf(stream, "%d (%g)",allstates[i].cols[k], allstates[i].colserrrate[k]); } } for (i=0; i< stateslen; i++) { fprintf(stream, "\nFORWARDSS[%d] ",i); for (j=0; j< 16; j++) { fprintf(stream, "%.5g ",allstates[i].forwards[j] + allstates[i].forwscale); } } for (i=0; i< stateslen; i++) { fprintf(stream, "\nBACKWARDSS[%d] ",i); for (j=0; j< 16; j++) { fprintf(stream, "%.5g ",allstates[i].backwards[j] + allstates[i].backscale); } } for (i=0; i< stateslen; i++) { fprintf(stream, "\nLETS[%d] ",i); for (k = 0; k < allstates[i].nlets; k++) { fprintf(stream, "%d ",allstates[i].lets[k]); } fprintf(stream, "%c",base_to_char(allstates[i].max_posterior, LETTER_SPACE)); fprintf(stream, " %.5g %.5g %.5g %.5g", allstates[i].posterior[0],allstates[i].posterior[1],allstates[i].posterior[2],allstates[i].posterior[3]); } fprintf(stream, "\n"); } /*maximum posterior traceback */ void post_traceback (states* allstates, int stateslen, double norm_px) { int i = 0, j, maxval; for (i = 0; i < stateslen; i++) { for (j=0; j< 4; j++) allstates[i].posterior[j] = 0; for (j = 0; j < 16; j++) { // fprintf(stderr, "%g %g %g\n",allstates[i].forwards[j], allstates[i].backwards[j], norm_px); allstates[i].posterior[right(j)] += exp(-1 * (allstates[i].forwards[j] + allstates[i].backwards[j] + allstates[i].forwscale + allstates[i].backscale - norm_px)); // fprintf(stderr, "distrib[%d,%d] = %g\n", i,j, exp(-1 * (allstates[i].forwards[j] + allstates[i].backwards[j] + allstates[i].forwscale + allstates[i].backscale - norm_px))); } maxval = 0; for (j=1; j< 4; j++) { // fprintf(stderr, "let_distrib[%d,%d] = %g\n", i,j, distrib[j]); if (allstates[i].posterior[j] >allstates[i].posterior[maxval]) maxval = j; } // fprintf (stderr, "\n"); //if (allstates[i].posterior[maxval] > confrate) { allstates[i].max_posterior = maxval; //} //else { // allstates[i].max_posterior = BASE_N; //} } } /*viterbi traceback */ /* char* vit_traceback (states* allstates, int stateslen) { char* result = (char*) calloc (stateslen + 1, 1); int i,j; int minval, prev; assert(0); // not changed to right letter emission for (i = stateslen -1; i >= 0; i--) { minval = 0; for (j = 0; j< 16; j++) { if (allstates[i].forwards[j] < allstates[i].forwards[minval]) { minval = j; } } prev = allstates[i].backpointer[minval]; if (i && (left(minval) != right (prev))) { fprintf (stderr, "BACKTRACE error %d %d %d\n", i, minval, prev); exit(2); } result[i] = letmap[left(minval)]; } return result; } void viterbi (states* allstates, int stateslen) { int i,j,k,let,col; int minback; double valback; double val; assert(0); // not changed to right letter emission i = 0; for (j = 0; j < 16; j++) { allstates[i].forwards[j] = nodePrior(allstates,i,j); } for (i=1; i < stateslen; i++) { for (j = 0; j < 16; j++) { allstates[i].forwards[j] = nodePrior(allstates,i,j); minback = left(j); for (k = 1; k < 16; k++) { if (left(j) == right(k)) { if (allstates[i-1].forwards[k] < allstates[i-1].forwards[minback]) { minback = k; } } } valback = allstates[i-1].forwards[minback]; allstates[i].forwards[j] += valback; allstates[i].backpointer[j] = minback; } } } */ double do_backwards (states* allstates, int stateslen) { int i,j,k; //,let,col; double val; i = stateslen-1; allstates[i].backscale = 999999999; for (j = 0; j < 16; j++) { allstates[i].backwards[j] = 0; // matei change: bug fix allstates[i].backscale = MIN2 (allstates[i].backscale, allstates[i].backwards[j]); } for (j = 0; j < 16; j++) { allstates[i].backwards[j] -= allstates[i].backscale; } for (i = stateslen-2; i >=0; i--) { allstates[i].backscale = 999999999; memset(allstates[i].backwards, 0, 16 * sizeof(allstates[i].backwards[0])); // matei: bug fix for (j = 0; j < 16; j++) { for (k = 0; k < 16; k++) { if (right(j) == left(k)) { val = nodePrior(allstates,i+1,k); allstates[i].backwards[j] += exp(-1*(val + allstates[i+1].backwards[k])); } } // fprintf(stdout, "bw was [%d, %d] = %g\n", i, j, allstates[i].backwards[j]); allstates[i].backwards[j] = -log(allstates[i].backwards[j]); // + neglogfourth; allstates[i].backscale = MIN2 (allstates[i].backscale, allstates[i].backwards[j]); } for (j = 0; j < 16; j++) { allstates[i].backwards[j] -= allstates[i].backscale; // fprintf(stdout, "bw is [%d, %d] = %g\n", i, j, allstates[i].backwards[j]); } allstates[i].backscale += allstates[i+1].backscale; } val = 0; i = 0; for (j = 0; j < 16; j++) { if (left(j) == init_bp) { // matei change: second letter emission val += exp(-1*(allstates[i].backwards[j] + nodePrior(allstates,i,j))); // + neglogfourth)); } } return -log(val) + allstates[0].backscale; } double do_forwards (states* allstates, int stateslen) { int i,j,k; //,let,col; double val; i = 0; j = 0; allstates[i].forwscale = 999999999; for (j = 0; j < 16; j++) { if (left(j) == init_bp) { // matei change: second letter emission allstates[i].forwards[j] = nodePrior(allstates,i,j); // + neglogfourth; allstates[i].forwscale = MIN2 (allstates[i].forwscale, allstates[i].forwards[j]); } else { allstates[i].forwards[j] = HUGE_VAL; } } for (j = 0; j < 16; j++) { allstates[i].forwards[j] -= allstates[i].forwscale; } for (i=1; i < stateslen; i++) { allstates[i].forwscale = 999999999; memset(allstates[i].forwards, 0, 16 * sizeof(allstates[i].forwards[0])); // matei: bug fix for (j = 0; j < 16; j++) { val = nodePrior(allstates,i,j); for (k = 0; k < 16; k++) { if (left(j) == right(k)) { allstates[i].forwards[j] += exp(-1*(allstates[i-1].forwards[k])); } } allstates[i].forwards[j] = val - log(allstates[i].forwards[j]); //+ neglogfourth; allstates[i].forwscale = MIN2 (allstates[i].forwscale, allstates[i].forwards[j]); } for (j = 0; j < 16; j++) { allstates[i].forwards[j] -= allstates[i].forwscale; } allstates[i].forwscale += allstates[i-1].forwscale; } val = 0; i = stateslen-1; for (j = 0; j < 16; j++) { val += exp(-1*(allstates[i].forwards[j])); // matei change: bug fix } return -log(val)+ allstates[i].forwscale; } double forward_backward (states* allstates, int stateslen) { double no1, no2; no1 = do_forwards(allstates, stateslen); no2 = do_backwards(allstates, stateslen); #ifdef DEBUG_POST_SW fprintf (stderr, "SANITY CHECK: no1 == no2 %g %g\n", no1, no2); #endif // don't really want a hard assert due to precision issues return no1; } /********************************************************************************* * * END Forward-backward code * *********************************************************************************/ int post_sw_setup(int _max_len, double _pr_snp, double _pr_xover, double _pr_del_open, double _pr_del_extend, double _pr_ins_open, double _pr_ins_extend, bool _use_read_qvs, bool _use_sanger_qvs, int _qual_vector_offset, int _qual_delta, bool reset_stats) { assert(0 == BASE_0); assert((BASE_A ^ BASE_C) == BASE_1); assert((BASE_A ^ BASE_G) == BASE_2); assert((BASE_A ^ BASE_T) == BASE_3); assert((BASE_C ^ BASE_G) == BASE_3); assert((BASE_C ^ BASE_T) == BASE_2); assert((BASE_G ^ BASE_T) == BASE_1); pr_snp = _pr_snp; pr_xover = _pr_xover; pr_del_open = _pr_del_open; pr_del_extend = _pr_del_extend; pr_ins_open = _pr_ins_open; pr_ins_extend = _pr_ins_extend; qual_delta = _qual_delta; use_read_qvs = _use_read_qvs; use_sanger_qvs = _use_sanger_qvs; if (!use_read_qvs) { default_qual = qv_from_pr_err(pr_xover); //pr_xover = pr_err_from_qv(default_qual); } else { qual_vector_offset = _qual_vector_offset; } //neglogsixteenth = -log(1.0/16.0); //neglogfourth = -log(1.0/4.0); max_len = _max_len; columns = (struct column *)xmalloc(max_len * sizeof(columns[0])); for (int i = 0; i < max_len; i++) { columns[i].lets = (int *)xmalloc(1 * sizeof(columns[i].lets[0])); columns[i].cols = (int *)xmalloc(1 * sizeof(columns[i].cols[0])); columns[i].letserrrate = (double *)xmalloc(1 * sizeof(columns[i].letserrrate[0])); columns[i].colserrrate = (double *)xmalloc(1 * sizeof(columns[i].colserrrate[0])); } if (reset_stats) { cells = invocs = 0; tc.type = DEF_FAST_TIME_COUNTER; tc.counter = 0; } initialized = 1; check = 0; return 1; } int post_sw_cleanup() { for (int i = 0; i < max_len; i++) { free(columns[i].lets); free(columns[i].cols); free(columns[i].letserrrate); free(columns[i].colserrrate); } free(columns); return 1; } int post_sw_stats(uint64_t * _invocs, uint64_t * _cells, double * _secs) { if (_invocs != NULL) *_invocs = invocs; if (_cells != NULL) *_cells = cells; if (_secs != NULL) *_secs = time_counter_get_secs(&tc); return 1; } /* * Extract genome sequence, read, and qvs of interest. */ static void load_local_vectors(uint32_t * read, int _init_bp, char * qual, struct sw_full_results * sfrp) { int start_run, col; int min_qv; int i, j; start_run = 0; min_qv = 10000; for (j = 0; j < sfrp->read_start; j++) { col = EXTRACT(read, j); if (col == BASE_N) { start_run = BASE_N; min_qv = 0; j = sfrp->read_start; break; } start_run ^= col; if (use_read_qvs) min_qv = MIN(min_qv, (int)qual[qual_vector_offset+j]); } len = 0; for (i = 0; sfrp->dbalign[i] != 0; i++) { if (sfrp->qralign[i] != '-') { // ow, it's a deletion; nothing to do if (sfrp->dbalign[i] != '-') { // MATCH columns[len].nlets = 1; columns[len].lets[0] = fasta_get_initial_base(COLOUR_SPACE, &sfrp->dbalign[i]); // => BASE_A/C/G/T columns[len].letserrrate[0] = pr_snp; } else { columns[len].nlets = 0; } // MATCH or INSERTION columns[len].ncols = 1; col = EXTRACT(read, j); if ((len == 0 && start_run == BASE_N) || col == BASE_N) { //columns[len].ncols = 0; // no emission columns[len].cols[0] = BASE_0; columns[len].colserrrate[0] = .75; } else { columns[len].cols[0] = EXTRACT(read, j) ^ (len == 0? start_run : 0); if (use_read_qvs) { columns[len].colserrrate[0] = pr_err_from_qv((len == 0? MIN(min_qv, (int)qual[qual_vector_offset + j]) : (int)qual[qual_vector_offset + j]) - qual_delta); if (!use_sanger_qvs) { columns[len].colserrrate[0] /= (1 + columns[len].colserrrate[0]); } if (columns[len].colserrrate[0] > .75) columns[len].colserrrate[0] = .75; } else { columns[len].colserrrate[0] = pr_xover; } } columns[len].base_call = char_to_base(sfrp->qralign[i]); assert(base_to_char(columns[len].base_call, LETTER_SPACE) == toupper(sfrp->qralign[i])); len++; j++; } } init_bp = _init_bp; #ifdef DEBUG_POST_SW int _i; fprintf(stderr, "db: "); for (_i = 0; _i < len; _i++) { fprintf(stderr, " %c", columns[_i].nlets > 0 ? base_to_char(columns[_i].lets[0], LETTER_SPACE) : '-'); } fprintf(stderr, "\n"); fprintf(stderr, "qr: %c", base_to_char(init_bp, LETTER_SPACE)); for (_i = 0; _i < len; _i++) { fprintf(stderr, " %c ", (columns[_i].ncols > 0 ? base_to_char(columns[_i].cols[0], COLOUR_SPACE) : '-')); } fprintf(stderr, "\n"); fprintf(stderr, "qv: "); for (_i = 0; _i < len; _i++) { fprintf(stderr, "%3d ", qv_from_pr_err(columns[_i].colserrrate[0])); } fprintf(stderr, "\n"); #endif } static void get_base_qualities(struct sw_full_results * sfrp) { int i, k; sfrp->qual = (char *)xmalloc((strlen(sfrp->qralign) + 1) * sizeof(sfrp->qual[0])); for (i = 0, k = 0; sfrp->qralign[i] != 0; i++) { if (sfrp->qralign[i] != '-') { int tmp = columns[k].base_call != BASE_N ? qv_from_pr_corr(columns[k].posterior[columns[k].base_call]) : 0; if (tmp > 40) tmp = 40; sfrp->qual[k] = 33 + tmp; // always 33+ in SAM k++; } } assert(k == len); sfrp->qual[k] = 0; } static double get_posterior(struct sw_full_results * sfrp, double total_score) { int i; double res; res = exp(-total_score); // - len * neglogfourth)); for (i = 0; sfrp->dbalign[i] != 0; i++) { if (sfrp->dbalign[i] == '-') { res *= pr_ins_extend; if (i == 0 || sfrp->dbalign[i-1] != '-') { res *= pr_ins_open; } } else if (sfrp->qralign[i] == '-') { res *= pr_del_extend; if (i == 0 || sfrp->qralign[i-1] != '-') { res *= pr_del_open; } } } return res; } /* * Main method, called after full SW. */ void post_sw(uint32_t * read, int _init_bp, char * qual, struct sw_full_results * sfrp) { double total_score; //llint before = rdtsc(), after; TIME_COUNTER_START(tc); invocs++; assert(sfrp != NULL); assert(sfrp->dbalign != NULL); if (!initialized) abort(); #ifdef DEBUG_POST_SW int _i, _j, _last_base, _new_base; char const * spaces = " "; fprintf(stderr, "Post SW\n"); fprintf(stderr, "dbalign: %s%s\n", spaces + strlen(spaces) - sfrp->read_start - 1, sfrp->dbalign); fprintf(stderr, "qralign: %s%s (offset: %d)\n", spaces + strlen(spaces) - sfrp->read_start - 1, sfrp->qralign, sfrp->read_start); fprintf(stderr, "read cs: %c", base_to_char(_init_bp, LETTER_SPACE)); for (_i = 0, _j = 0; _i < (int)sfrp->read_start + (int)strlen(sfrp->qralign); _i++) { if (_j < sfrp->read_start) { fprintf(stderr, "%c", base_to_char(EXTRACT(read, _j), COLOUR_SPACE)); _j++; } else { if (sfrp->qralign[_i - sfrp->read_start] == '-') { fprintf(stderr, "-"); } else { fprintf(stderr, "%c", base_to_char(EXTRACT(read, _j), COLOUR_SPACE)); _j++; } } } fprintf(stderr, "\n"); fprintf(stderr, "read ls: "); _last_base = _init_bp; for (_i = 0, _j = 0; _i < (int)sfrp->read_start + (int)strlen(sfrp->qralign); _i++) { if (_j < sfrp->read_start) { _new_base = cstols(_last_base, EXTRACT(read, _j), false); fprintf(stderr, "%c", base_to_char(_new_base, LETTER_SPACE)); _last_base = _new_base; _j++; } else { if (sfrp->qralign[_i - sfrp->read_start] == '-') { fprintf(stderr, "-"); } else { _new_base = cstols(_last_base, EXTRACT(read, _j), false); fprintf(stderr, "%c", base_to_char(_new_base, LETTER_SPACE)); _last_base = _new_base; _j++; } } } fprintf(stderr, "\n"); fprintf(stderr, "read qv: "); for (_i = 0, _j = 0; _i < (int)sfrp->read_start + (int)strlen(sfrp->qralign); _i++) { if (_j < sfrp->read_start) { fprintf(stderr, "%c", use_read_qvs? qual[_j] : qual_delta + default_qual); _j++; } else { if (sfrp->qralign[_i - sfrp->read_start] == '-') { fprintf(stderr, " "); } else { fprintf(stderr, "%c", use_read_qvs? qual[_j] : qual_delta + default_qual); _j++; } } } fprintf(stderr, "\n"); #endif load_local_vectors(read, _init_bp, qual, sfrp); total_score = forward_backward(columns, len); post_traceback(columns, len, total_score); get_base_qualities(sfrp); sfrp->posterior = get_posterior(sfrp, total_score); #ifdef DEBUG_POST_SW fprintf(stderr, "don: "); for (_i = 0; _i < len; _i++) { fprintf(stderr, " %c", base_to_char(columns[_i].max_posterior, LETTER_SPACE)); } fprintf(stderr, "\n"); fprintf(stderr, "bqv: "); for (_i = 0; _i < len; _i++) { int res = columns[_i].posterior[columns[_i].max_posterior] > 1 - .00000001? 80 : (int)(-10.0*(log(1 - columns[_i].posterior[columns[_i].max_posterior])/log(10.0))); fprintf(stderr, " %3d", res); } fprintf(stderr, "\n"); fprintf(stderr, "qralign: "); for (_i = 0, _j = 0; sfrp->qralign[_i] != 0; _i++) { if (sfrp->qralign[_i] != '-') { fprintf(stderr, " %c", sfrp->qralign[_i]); } } fprintf(stderr, "\n"); fprintf(stderr, "bqv: "); for (_i = 0, _j = 0; sfrp->qralign[_i] != 0; _i++) { if (sfrp->qralign[_i] != '-') { fprintf(stderr, "%3d", (int)(sfrp->qual[_j] - qual_delta)); _j++; } } fprintf(stderr, "\n"); printStates(columns, len, stderr); #endif cells += 16*len; //after = rdtsc(); //ticks += MAX(after - before, 0); TIME_COUNTER_STOP(tc); }
GB_unop__identity_uint8_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint8_uint16) // op(A') function: GB (_unop_tran__identity_uint8_uint16) // C type: uint8_t // A type: uint16_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = (uint8_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint8_uint16) ( uint8_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint8_uint16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_whereami.c
/* Program omp_whereami reports the mask for each OMP thread, and works for nsec seconds (10). This allows one to inspect occupation through utilities like top (e.g. execute top, then hit the 1 key). Uses maskeraid utilities github.com/TACC/maskeraid map_to_cpuid(cpu_id): will set current thread to cpu_id omp_report_mask(): reports masks of the threads load_cpu_nsec(nsec): load the cpu for nsec (default 10) */ /* omp_whereami.c is a driver 1.) Get line arguments (optional): help or number of seconds for load 2.) Start OpenMP parallel region omp_report_mask() reports masks for each thread 3.) Set a work load on each thread 4.) Finish parallel region Kent Milfeld 12/16/15 Added cmd_line argument extraction. Kent Milfeld 2016/07/13 */ #include <stdio.h> #include <omp.h> #include "opts.h" void load_cpu_nsec(int nsec); void omp_report_mask(); int map_to_cpuid( int icore); int main(int argc, char *argv[]){ int nthrds, thrd, cpuid; //Thread info int nsec = 10; // Load, default time int ierr; // Error number // cmdln_get_nsec_or_help( &nsec, argc, argv); //optional, get nsec from cmd line Maskopts opts(argc,argv); #pragma omp parallel private(thrd,ierr) { thrd = omp_get_thread_num(); nthrds = omp_get_num_threads(); // cpuid = thrd; // set cpuid to thread number (thrd) // ierr = map_to_cpuid( cpuid ); // set your own affinity here omp_report_mask(); // Call mask reporter load_cpu_nsec( nsec ); // Load up rank process so user can watch top. } }
mixed_fhn_mod_mitchell.c
#include <stdio.h> #include "mixed_fhn_mod_mitchell.h" // TODO: Maybe change this function // Set number_of_ode_equations to the maximum 'NEQ' ? GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V_1; if(get_neq) cell_model->number_of_ode_equations = NEQ_1; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of modified FHN 1961 + Mitchell-Shaeffer 2003 CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Based on the mapping initialize the initial conditions from the correct celular model if (mapping[sv_id] == 0) { sv[0] = 0.000000f; //Vm millivolt sv[1] = 0.000000f; //v dimensionless } else { sv[0] = 0.00000820413566106744f; //Vm millivolt sv[1] = 0.8789655121804799f; //h dimensionless } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_fhn(dt, sv + (sv_id * NEQ_1), stim_currents[i]); else solve_model_ode_cpu_mitchell(dt, sv + (sv_id * NEQ_2), stim_currents[i]); } } } void solve_model_ode_cpu_fhn (real dt, real *sv, real stim_current) { real rY[NEQ_1], rDY[NEQ_1]; for(int i = 0; i < NEQ_1; i++) rY[i] = sv[i]; RHS_cpu_fhn(rY, rDY, stim_current); for(int i = 0; i < NEQ_1; i++) sv[i] = dt*rDY[i] + rY[i]; } void RHS_cpu_fhn (const real *sv, real *rDY_, real stim_current) { //State variables const real u = sv[0]; const real v = sv[1]; // Constants const real a = 0.2f; const real b = 0.5f; const real k = 36.0; const real epsilon = 0.000150; // Rates rDY_[0] = k*(u*(1.0f - u)*(u - a) - u*v) + stim_current; rDY_[1] = k*epsilon*(b*u - v); } void solve_model_ode_cpu_mitchell (real dt, real *sv, real stim_current) { real rY[NEQ_2], rDY[NEQ_2]; for(int i = 0; i < NEQ_2; i++) rY[i] = sv[i]; RHS_cpu_mitchell(rY, rDY, stim_current); for(int i = 0; i < NEQ_2; i++) sv[i] = dt*rDY[i] + rY[i]; } void RHS_cpu_mitchell(const real *sv, real *rDY_, real stim_current) { //State variables const real V = sv[0]; const real h = sv[1]; // Constants const real tau_in = 0.3; const real tau_out = 6.0; const real V_gate = 0.13; const real tau_open = 120.0; const real tau_close = 150.0; // Algebraics real J_stim = stim_current; real J_in = ( h*( pow(V, 2.00000)*(1.00000 - V)))/tau_in; real J_out = - (V/tau_out); // Rates rDY_[0] = J_out + J_in + J_stim; rDY_[1] = (V < V_gate ? (1.00000 - h)/tau_open : - h/tau_close); }
Par-20-ParallelForCallFuncParallelFor.c
int foo(int *A, int l) { #pragma omp parallel for for (int j=0; j < l; ++j) { A[j] = 3 * A[j]; } return l; } int main(int argc, char **argv) { int a[4] = {1,2,3,4}; int b[4] = {0, 0, 0, 0}; #pragma omp parallel { #pragma omp for for (int i = 0; i < 4; ++i) { b[i] = foo(a, 4); } } return 0; }
GB_binop__rdiv_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_uint16) // A*D function (colscale): GB (_AxD__rdiv_uint16) // D*A function (rowscale): GB (_DxB__rdiv_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_uint16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_uint16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_uint16) // C=scalar+B GB (_bind1st__rdiv_uint16) // C=scalar+B' GB (_bind1st_tran__rdiv_uint16) // C=A+scalar GB (_bind2nd__rdiv_uint16) // C=A'+scalar GB (_bind2nd_tran__rdiv_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_UNSIGNED (y, x, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_UINT16 || GxB_NO_RDIV_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rdiv_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (bij, x, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (y, aij, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 16) ; \ } GrB_Info GB (_bind1st_tran__rdiv_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 16) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
task_memory.c
// RUN: %libomp-compile-and-run | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #define USE_PRIVATE_TOOL 1 #include "callback.h" #include <omp.h> int main() { int x; #pragma omp parallel num_threads(2) { #pragma omp master { #pragma omp task { x++; } #pragma omp task firstprivate(x) { x++; } } } return 0; } static void on_ompt_callback_implicit_task(ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, unsigned int team_size, unsigned int thread_num, int flag) { void *addr = NULL; size_t size = 0; int result = ompt_get_task_memory(&addr, &size, 0); switch (endpoint) { case ompt_scope_begin: task_data->value = ompt_get_unique_id(); printf("ompt_event_implicit_task_begin: task_id=%" PRIu64 ", memory_addr=%p, memory_size=%lu, result=%d \n", task_data->value, addr, size, result); break; case ompt_scope_end: printf("ompt_event_implicit_task_end: task_id=%" PRIu64 ", memory_addr=%p, memory_size=%lu, result=%d \n", task_data->value, addr, size, result); break; } } static void on_ompt_callback_task_create(ompt_data_t *encountering_task_data, const ompt_frame_t *encountering_task_frame, ompt_data_t *new_task_data, int flags, int has_dependences, const void *codeptr_ra) { if (flags & ompt_task_initial) return; // not interested in the initial task new_task_data->value = ompt_get_unique_id(); void *addr = NULL; size_t size = 0; printf("ompt_event_task_create: task_id=%" PRIu64 "\n", new_task_data->value); } static void on_ompt_callback_task_schedule(ompt_data_t *first_task_data, ompt_task_status_t prior_task_status, ompt_data_t *second_task_data) { void *addr = NULL; size_t size = 0; int result = ompt_get_task_memory(&addr, &size, 0); printf("ompt_event_task_schedule: task_id=%" PRIu64 ", memory_addr=%p, memory_size=%lu, result=%d\n", first_task_data->value, addr, size, result); } int ompt_initialize(ompt_function_lookup_t lookup, int initial_device_num, ompt_data_t *tool_data) { ompt_set_callback = (ompt_set_callback_t)lookup("ompt_set_callback"); ompt_get_unique_id = (ompt_get_unique_id_t)lookup("ompt_get_unique_id"); ompt_get_task_memory = (ompt_get_task_memory_t)lookup("ompt_get_task_memory"); register_callback(ompt_callback_implicit_task); register_callback(ompt_callback_task_create); register_callback(ompt_callback_task_schedule); printf("0: NULL_POINTER=%p\n", (void *)NULL); return 1; // success } void ompt_finalize(ompt_data_t *tool_data) {} ompt_start_tool_result_t *ompt_start_tool(unsigned int omp_version, const char *runtime_version) { static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize, &ompt_finalize, 0}; return &ompt_start_tool_result; } // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // CHECK: ompt_event_implicit_task_begin: task_id=[[TASK_ID:[0-9]+]] // CHECK-SAME: memory_addr=[[NULL]], memory_size=0, result=0 // CHECK: ompt_event_task_create: task_id=[[TASK_ID_0:[0-9]+]] // CHECK-DAG: ompt_event_task_create: task_id=[[TASK_ID_1:[0-9]+]] // Expects non-zero address, size, and result // CHECK-DAG: ompt_event_task_schedule: task_id=[[TASK_ID_0]], // memory_addr=0x{{[0-f]+}}, memory_size={{[1-9][0-9]*}}, result=1 // CHECK-DAG: ompt_event_task_schedule: task_id=[[TASK_ID_1]], // memory_addr=0x{{[0-f]+}}, memory_size={{[1-9][0-9]*}}, result=1 // CHECK: ompt_event_implicit_task_end: task_id=[[TASK_ID]] // CHECK-SAME: memory_addr=[[NULL]], memory_size=0, result=0
GB_unop__acosh_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__acosh_fc32_fc32 // op(A') function: GB_unop_tran__acosh_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = cacoshf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cacoshf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = cacoshf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ACOSH || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__acosh_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = cacoshf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = cacoshf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__acosh_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ntlmv1_mschapv2_fmt_plug.c
/* * Previous files MSCHAPv2_fmt_plug.c and NETNTLM_fmt_plug.c now merged into * this one file, sharing functions. * * NETNTLM_fmt.c -- NTLM Challenge/Response * Written by JoMo-Kun <jmk at foofus.net> in 2007 * and placed in the public domain. * * This algorithm is designed for performing brute-force cracking of the NTLM * (version 1) challenge/response pairs exchanged during network-based * authentication attempts [1]. The captured challenge/response pairs from these * attempts should be stored using the L0phtCrack 2.0 LC format, specifically: * username:unused:unused:lm response:ntlm response:challenge. For example: * * CORP\Administrator:::25B2B477CE101D83648BB087CE7A1C217F51C7FC64C0EBB1: * C8BD0C1630A9ECF7A95F494A8F0B2CB4A3F25B1225514304:1122334455667788 * * It should be noted that a NTLM authentication response is not same as a NTLM * password hash, which can be extracted using tools such as FgDump [2]. NTLM * responses can be gathered via normal network capture or via tools which * perform layer 2 attacks, such as Ettercap [3] and Cain [4]. The responses can * also be harvested using a modified Samba service [5] in conjunction with * some trickery to convince the user to connect to it. I leave what that * trickery may actually be as an exercise for the reader (HINT: Karma, NMB * broadcasts, IE, Outlook, social engineering, ...). * * [1] http://davenport.sourceforge.net/ntlm.html#theNtlmResponse * [2] http://www.foofus.net/~fizzgig/fgdump/ * [3] http://ettercap.sourceforge.net/ * [4] http://www.oxid.it/cain.html * [5] http://www.foofus.net/jmk/smbchallenge.html * * This version supports Extended Session Security. This is what * is used when the "LM" hash ends in 32 zeros: * * DOMAIN\User:::c70e4fb229437ef300000000000000000000000000000000: * abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9:24ca92fdab441aa4 * * MSCHAPv2_fmt.c -- Microsoft PPP CHAP Extensions, Version 2 * Written by JoMo-Kun <jmk at foofus.net> in 2010 * and placed in the public domain. * * Support for freeradius-wep-patch challenge/response format * added by Linus Lüssing in 2012 and is licensed under CC0/PD terms: * To the extent possible under law, Linus Lüssing has waived all copyright * and related or neighboring rights to this work. This work is published from: * Germany. * * * This algorithm is designed for performing brute-force cracking of the * MSCHAPv2 challenge/response sets exchanged during network-based * authentication attempts. The captured challenge/response set from these * attempts should be stored using the following format: * * USERNAME:::AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE * USERNAME::DOMAIN:AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE * DOMAIN\USERNAME:::AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE * :::MSCHAPv2 CHALLENGE:MSCHAPv2 RESPONSE: * * For example: * User:::5B5D7C7D7B3F2F3E3C2C602132262628:82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF:21402324255E262A28295F2B3A337C7E * domain\fred:::56d64cbe7bad61349a0b752335100eaf:d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b:7f8a466cff2a6bf0c80218bbf56d76bc * * http://freeradius.org/rfc/rfc2759.txt * * Modified for performance and support for SSE2, NTLMv1 ESS, OMP and UTF-8, by * magnum 2010-2011 and 2013. * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_MSCHAPv2_new; extern struct fmt_main fmt_NETNTLM_new; #elif FMT_REGISTERS_H john_register_one(&fmt_MSCHAPv2_new); john_register_one(&fmt_NETNTLM_new); #else #include <string.h> #include <openssl/des.h> #include "arch.h" #include "sse-intrinsics.h" #ifdef MMX_COEF #define NBKEYS (MMX_COEF * MD4_SSE_PARA) #else #ifdef _OPENMP #define OMP_SCALE 4 #include <omp.h> #endif #endif #include "misc.h" #include "common.h" #include "formats.h" #include "options.h" #include "memory.h" #include "sha.h" #include "md4.h" #include "md5.h" #include "unicode.h" #include "memdbg.h" extern volatile int bench_running; #ifndef uchar #define uchar unsigned char #endif #define MIN(a, b) (((a) > (b)) ? (b) : (a)) #define CHAP_FORMAT_LABEL "MSCHAPv2" #define CHAP_FORMAT_NAME "C/R" #define CHAP_USERNAME_LENGTH 256 #define CHAP_CHALLENGE_LENGTH 64 #define CHAP_TOTAL_LENGTH 13 + CHAP_USERNAME_LENGTH + CHAP_CHALLENGE_LENGTH + CIPHERTEXT_LENGTH #define NTLM_FORMAT_LABEL "netntlm" #define NTLM_FORMAT_NAME "NTLMv1 C/R" #define NTLM_TOTAL_LENGTH (10 + 2 * 2 * SALT_SIZE + CIPHERTEXT_LENGTH) #define ALGORITHM_NAME "MD4 DES (ESS MD5) " MD4_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1000 #define FULL_BINARY_SIZE (2 + 8 * 3) #define BINARY_SIZE (2 + 8) #define BINARY_ALIGN 2 #define SALT_SIZE 8 #define SALT_ALIGN MEM_ALIGN_WORD #define CIPHERTEXT_LENGTH 48 #ifdef MMX_COEF #define PLAINTEXT_LENGTH 27 //#define SSE_OMP #if defined (_OPENMP) && defined(SSE_OMP) #define BLOCK_LOOPS (2048 / NBKEYS) #else #define BLOCK_LOOPS (1024 / NBKEYS) #endif #define MIN_KEYS_PER_CRYPT (NBKEYS * BLOCK_LOOPS) #define MAX_KEYS_PER_CRYPT (NBKEYS * BLOCK_LOOPS) #define GETPOS(i, index) ( (index&(MMX_COEF-1))*4 + ((i)&(0xffffffff-3))*MMX_COEF + ((i)&3) + (index>>(MMX_COEF>>1))*16*MMX_COEF*4 ) #define GETOUTPOS(i, index) ( (index&(MMX_COEF-1))*4 + ((i)&(0xffffffff-3))*MMX_COEF + ((i)&3) + (index>>(MMX_COEF>>1))*4*MMX_COEF*4 ) #else #define PLAINTEXT_LENGTH 64 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 2048 #endif #ifdef MMX_COEF static unsigned char *saved_key; #else static UTF16 (*saved_key)[PLAINTEXT_LENGTH + 1]; static int (*saved_key_length); #endif static unsigned short (*crypt_key); static unsigned char *nthash; static ARCH_WORD_32 *bitmap; static int cmps_per_crypt, use_bitmap; static int valid_i, valid_j; static uchar *challenge; static int keys_prepared; static struct fmt_main *my; static char *chap_long_to_short(char *orig); /* used to cannonicalize the MSCHAPv2 format */ static struct fmt_tests chap_tests[] = { {"$MSCHAPv2$4c092fd3fd98236502e8591100046326$b912ce522524d33123a982cf330a57f8e953fa7974042b5d$6a4915d0ce61d42be533640a75391925$1111", "2222"}, {"$MSCHAPv2$5B5D7C7D7B3F2F3E3C2C602132262628$82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF$21402324255E262A28295F2B3A337C7E$User", "clientPass"}, {"$MSCHAPv2$d07054459a1fdbc266a006f0220e6fac$33c8331a9b03b7e003f09dd253d740a2bead544143cc8bde$3545cb1d89b507a5de104435e81b14a4$testuser1", "Cricket8"}, {"$MSCHAPv2$56d64cbe7bad61349a0b752335100eaf$d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b$7f8a466cff2a6bf0c80218bbf56d76bc$fred", "OMG!BBQ!11!one"}, /* domain\fred */ #if PLAINTEXT_LENGTH >= 35 {"$MSCHAPv2$b3c42db475b881d3c52ff3923d7b3bf8$f07c7a4eb391f5debe32d814679a5a69661b86b33227c4f8$6321f8649b971bd11ce8d5cb22a4a738$bOb", "asdblahblahblahblahblahblahblahblah"}, /* WorkGroup\bOb */ #endif {"$MSCHAPv2$d94e7c7972b2376b28c268583e162de7$eba25a3b04d2c7085d01f842e2befc91745c40db0f792356$0677ca7318fd7f65ae1b4f58c9f4f400$lameuser", ""}, /* no password */ {"$MSCHAPv2$8710da60ebfc4cab$c4e3bb55904c966927ee68e5f1472e1f5d8ec165713b5360$$foo4", "bar4" }, {"$MSCHAPv2$8710da60ebfc4cab$c4e3bb55904c966927ee68e5f1472e1f5d8ec165713b5360$$", "bar4" }, /* Ettercap generated three test vectors */ {"$MSCHAPv2$3D79CC8CDC0261D4$B700770725F87739ADB110B310D9A289CDBB550ADCA6CB86$solar", "solarisalwaysbusy"}, {"$MSCHAPv2$BA75EB14EFBFBF25$ED8CC90FD40FAA2D6BCD0ABD0B1F562FD777DF6C5609C98B$lulu", "password"}, {"$MSCHAPv2$95A87FA62EBCD2E3C8B09E1B448A6C72$ED8CC90FD40FAA2D6BCD0ABD0B1F562FD777DF6C5609C98B$E2AE0995EAAC6CEFF0D9757428B51509$lulu", "password"}, /* Single test vector from chapcrack's sample pcap file */ {"$MSCHAPv2$6D0E1C056CD94D5F$1C93ABCE815400686BAECA315F348469256420598A73AD49$moxie", "bPCFyF2uL1p5Lg5yrKmqmY"}, {"", "clientPass", {"User", "", "", "5B5D7C7D7B3F2F3E3C2C602132262628", "82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF", "21402324255E262A28295F2B3A337C7E"} }, {"", "Cricket8", {"testuser1", "", "", "d07054459a1fdbc266a006f0220e6fac", "33c8331a9b03b7e003f09dd253d740a2bead544143cc8bde", "3545cb1d89b507a5de104435e81b14a4"} }, {"", "OMG!BBQ!11!one", {"domain\\fred", "", "", "56d64cbe7bad61349a0b752335100eaf", "d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b", "7f8a466cff2a6bf0c80218bbf56d76bc"} }, /* domain\fred */ {"", "", {"lameuser", "", "domain", "d94e7c7972b2376b28c268583e162de7", "eba25a3b04d2c7085d01f842e2befc91745c40db0f792356", "0677ca7318fd7f65ae1b4f58c9f4f400"} }, /* no password */ {NULL} }; static struct fmt_tests ntlm_tests[] = { {"$NETNTLM$1122334455667788$BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "g3rg3g3rg3g3rg3"}, #ifndef MMX_COEF /* exceeds max length for SSE */ {"$NETNTLM$1122334455667788$E463FAA5D868ECE20CAE622474A2F440A652D642156AF863", "M1xedC4se%^&*@)##(blahblah!@#"}, #endif {"$NETNTLM$c75c20bff9baa71f4765f360625700b0$81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "password"}, {"$NETNTLM$1122334455667788$35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "FooBarGerg"}, {"$NETNTLM$1122334455667788$A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "visit www.foofus.net"}, {"$NETNTLM$24ca92fdab441aa4c70e4fb229437ef3$abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9", "longpassword"}, {"$NETNTLM$1122334455667788$B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "cory21"}, {"", "g3rg3g3rg3g3rg3", {"User", "", "", "lm-hash", "BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "1122334455667788"} }, {"", "FooBarGerg", {"User", "", "", "lm-hash", "35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "1122334455667788"} }, {"", "visit www.foofus.net", {"User", "", "", "lm-hash", "A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "1122334455667788"} }, {"", "password", {"ESS", "", "", "4765f360625700b000000000000000000000000000000000", "81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "c75c20bff9baa71f"} }, {"", "cory21", {"User", "", "", "lm-hash", "B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "1122334455667788"} }, {NULL} }; static inline void setup_des_key(uchar key_56[], DES_key_schedule *ks) { DES_cblock key; key[0] = key_56[0]; key[1] = (key_56[0] << 7) | (key_56[1] >> 1); key[2] = (key_56[1] << 6) | (key_56[2] >> 2); key[3] = (key_56[2] << 5) | (key_56[3] >> 3); key[4] = (key_56[3] << 4) | (key_56[4] >> 4); key[5] = (key_56[4] << 3) | (key_56[5] >> 5); key[6] = (key_56[5] << 2) | (key_56[6] >> 6); key[7] = (key_56[6] << 1); DES_set_key(&key, ks); } static int chap_valid_long(char *ciphertext) { char *pos, *pos2; if (ciphertext == NULL) return 0; else if (strncmp(ciphertext, "$MSCHAPv2$", 10)!=0) return 0; if (strlen(ciphertext) > CHAP_TOTAL_LENGTH) return 0; /* Validate Authenticator/Server Challenge Length */ pos = &ciphertext[10]; for (pos2 = pos; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CHAP_CHALLENGE_LENGTH / 2)) ) return 0; /* Validate MSCHAPv2 Response Length */ pos2++; pos = pos2; for (; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) ) return 0; /* Validate Peer/Client Challenge Length */ pos2++; pos = pos2; for (; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CHAP_CHALLENGE_LENGTH / 2)) ) return 0; /* Validate Username Length */ if (strlen(++pos2) > CHAP_USERNAME_LENGTH) return 0; return 1; } static int chap_valid_short(char *ciphertext) { char *pos, *pos2; if (ciphertext == NULL) return 0; else if (strncmp(ciphertext, "$MSCHAPv2$", 10)!=0) return 0; if (strlen(ciphertext) > CHAP_TOTAL_LENGTH) return 0; /* Validate MSCHAPv2 Challenge Length */ pos = &ciphertext[10]; for (pos2 = pos; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CHAP_CHALLENGE_LENGTH / 4)) ) return 0; /* Validate MSCHAPv2 Response Length */ pos2++; pos = pos2; for (; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) ) return 0; return 1; } static void chap_get_challenge(const char *ciphertext, unsigned char *binary_salt) { int i; const char *pos = ciphertext + 10; for (i = 0; i < SALT_SIZE; i++) binary_salt[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])]; } /* Either the cipherext already contains the MSCHAPv2 Challenge (4 Bytes) or we are going to calculate it via: sha1(|Peer/Client Challenge (8 Bytes)|Authenticator/Server Challenge (8 Bytes)|Username (<=256)|) NOTE, we now ONLY call this function the the short form. The long form gets converted into the short form in either prepare or split function. The short form is cannonical form (Change made July, 2014, JimF) */ static void *chap_get_salt(char *ciphertext) { static unsigned char *binary_salt; unsigned char digest[20]; if (!binary_salt) binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD); /* This is just to silence scan-build. It will never happen. It is unclear why only this format gave warnings, many others do similar things. */ if (!ciphertext) return ciphertext; memset(binary_salt, 0, SALT_SIZE); memset(digest, 0, 20); chap_get_challenge(ciphertext, binary_salt); return (void*)binary_salt; } /* * This function will convert long hashes, into short ones (the short is now cannonical format) * converts * $MSCHAPv2$95a87fa62ebcd2e3c8b09e1b448a6c72$ed8cc90fd40faa2d6bcd0abd0b1f562fd777df6c5609c98b$e2ae0995eaac6ceff0d9757428b51509$lulu * into * $MSCHAPv2$ba75eb14efbfbf25$ed8cc90fd40faa2d6bcd0abd0b1f562fd777df6c5609c98b$$ * * This code was moved from get_salt(). */ static char *chap_long_to_short(char *ciphertext) { static char Buf[CHAP_TOTAL_LENGTH+1]; // larger than we need, but not a big deal static SHA_CTX ctx; unsigned char tmp[16]; unsigned char digest[20]; char *pos = NULL; int i; SHA1_Init(&ctx); /* Peer Challenge */ pos = ciphertext + 10 + 16*2 + 1 + 24*2 + 1; /* Skip $MSCHAPv2$, Authenticator Challenge and Response Hash */ memset(tmp, 0, 16); for (i = 0; i < 16; i++) tmp[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])]; SHA1_Update(&ctx, tmp, 16); /* Authenticator Challenge */ pos = ciphertext + 10; /* Skip $MSCHAPv2$ */ memset(tmp, 0, 16); for (i = 0; i < 16; i++) tmp[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])]; SHA1_Update(&ctx, tmp, 16); /* Username - Only the user name (as presented by the peer and excluding any prepended domain name) is used as input to SHAUpdate() */ pos = ciphertext + 10 + 16*2 + 1 + 24*2 + 1 + 16*2 + 1; /* Skip $MSCHAPv2$, Authenticator, Response and Peer */ SHA1_Update(&ctx, pos, strlen(pos)); SHA1_Final(digest, &ctx); // Ok, now we re-make our ciphertext buffer, into the short cannonical form. strcpy(Buf, "$MSCHAPv2$"); pos = Buf + 10; for (i = 0; i < SALT_SIZE; i++) { //binary_salt.u8[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])]; pos[(i<<1)] = itoa16[digest[i]>>4]; pos[(i<<1)+1] = itoa16[digest[i]&0xF]; } memcpy(&pos[16], &ciphertext[42], CIPHERTEXT_LENGTH+2); pos[16+CIPHERTEXT_LENGTH+2] = '$'; pos[16+CIPHERTEXT_LENGTH+3] = 0; //printf ("short=%s original=%s\n", Buf, ciphertext); return Buf; } static int chap_valid(char *ciphertext, struct fmt_main *pFmt) { char *cp = NULL; if (chap_valid_short(ciphertext)) cp = ciphertext + 10 + CHAP_CHALLENGE_LENGTH / 4 + 1; else if (chap_valid_long(ciphertext)) cp = ciphertext + 10 + CHAP_CHALLENGE_LENGTH / 2 + 1; if (cp) { uchar key[7] = {0, 0, 0, 0, 0, 0, 0}; DES_key_schedule ks; DES_cblock b3cmp; uchar binary[8]; DES_cblock *challenge = chap_get_salt(ciphertext); int i, j; cp += 2 * 8 * 2; for (i = 0; i < 8; i++) { binary[i] = atoi16[ARCH_INDEX(cp[i * 2])] << 4; binary[i] |= atoi16[ARCH_INDEX(cp[i * 2 + 1])]; } key[0] = valid_i; key[1] = valid_j; setup_des_key(key, &ks); DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT); if (!memcmp(binary, &b3cmp, 8)) return 1; for (i = 0; i < 0x100; i++) for (j = 0; j < 0x100; j++) { key[0] = i; key[1] = j; setup_des_key(key, &ks); DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT); if (!memcmp(binary, &b3cmp, 8)) { valid_i = i; valid_j = j; return 1; } } #ifdef DEBUG if (!bench_running) fprintf(stderr, "Rejected MSCHAPv2 hash with " "invalid 3rd block\n"); #endif } return 0; } static char *chap_prepare_long(char *split_fields[10]) { char *username, *cp; /* DOMAIN\USERNAME -or - USERNAME -- ignore DOMAIN */ if ((username = strstr(split_fields[0], "\\")) == NULL) username = split_fields[0]; else username++; cp = mem_alloc(1+8+1+strlen(split_fields[3])+1+strlen(split_fields[4])+ 1+strlen(split_fields[5])+1+strlen(username)+1); sprintf(cp, "$MSCHAPv2$%s$%s$%s$%s", split_fields[3], split_fields[4], split_fields[5], username); if (chap_valid_long(cp)) { char *cp2 = str_alloc_copy(cp); MEM_FREE(cp); return cp2; } MEM_FREE(cp); return split_fields[1]; } static char *chap_prepare_short(char *split_fields[10]) { char *cp; cp = mem_alloc(1+8+1+strlen(split_fields[3])+1+strlen(split_fields[4])+ 1+1+1); sprintf(cp, "$MSCHAPv2$%s$%s$$", split_fields[3], split_fields[4]); if (chap_valid_short(cp)) { char *cp2 = str_alloc_copy(cp); MEM_FREE(cp); return cp2; } MEM_FREE(cp); return split_fields[1]; } static char *chap_prepare(char *split_fields[10], struct fmt_main *pFmt) { char *ret; if (!strncmp(split_fields[1], "$MSCHAPv2$", 10)) { // check for a short format that has any extra trash fields, and if so remove them. char *cp1, *cp2, *cp3; cp1 = split_fields[1]; cp1 += 10; cp2 = strchr(cp1, '$'); ret = NULL; if (cp2 && cp2-cp1 == CHAP_CHALLENGE_LENGTH/4) { ++cp2; cp3 = strchr(cp2, '$'); if (cp3 && cp3-cp2 == CIPHERTEXT_LENGTH && (strlen(cp3) > 2 || cp3[2] != '$')) { ret = str_alloc_copy(split_fields[1]); ret[(cp3-split_fields[1]) + 1] = '$'; ret[(cp3-split_fields[1]) + 2] = 0; //printf ("Here is the cut item: %s\n", ret); } } } else if (split_fields[0] && split_fields[3] && split_fields[4] && split_fields[5] && strlen(split_fields[3]) == CHAP_CHALLENGE_LENGTH/2 && strlen(split_fields[4]) == CIPHERTEXT_LENGTH && strlen(split_fields[5]) == CHAP_CHALLENGE_LENGTH/2) ret = chap_prepare_long(split_fields); else if (split_fields[0] && split_fields[3] && split_fields[4] && strlen(split_fields[3]) == CHAP_CHALLENGE_LENGTH/4 && strlen(split_fields[4]) == CIPHERTEXT_LENGTH) ret = chap_prepare_short(split_fields); else ret = NULL; if (ret && chap_valid_long(ret)) ret = chap_long_to_short(ret); else if (chap_valid_long(split_fields[1])) ret = chap_long_to_short(split_fields[1]); return ret ? ret : split_fields[1]; } static char *chap_split(char *ciphertext, int index, struct fmt_main *self) { static char out[CHAP_TOTAL_LENGTH + 1]; int i, j = 0; memset(out, 0, CHAP_TOTAL_LENGTH + 1); memcpy(out, ciphertext, strlen(ciphertext)); /* convert hashes to lower-case - exclude $MSCHAPv2 and USERNAME */ for (i = 10; i < CHAP_TOTAL_LENGTH + 1 && j < 3; i++) { if (out[i] >= 'A' && out[i] <= 'Z') out[i] |= 0x20; else if (out[i] == '$') j++; } if (chap_valid_long(out)) return chap_long_to_short(out); return out; } static void *ntlm_get_salt(char *ciphertext) { static uchar *binary_salt; int i; if (!binary_salt) binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD); if (ciphertext[25] == '$') { // Server challenge ciphertext += 9; for (i = 0; i < SALT_SIZE; ++i) binary_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; } else { uchar es_salt[2*SALT_SIZE], k1[2*SALT_SIZE]; MD5_CTX ctx; ciphertext += 9; // Extended Session Security, // Concatenate Server & Client challenges for (i = 0;i < 2 * SALT_SIZE; ++i) es_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; // MD5 the concatenated challenges, result is our key MD5_Init(&ctx); MD5_Update(&ctx, es_salt, 16); MD5_Final((void*)k1, &ctx); memcpy(binary_salt, k1, SALT_SIZE); // but only 8 bytes of it } return (void*)binary_salt; } static int ntlm_valid(char *ciphertext, struct fmt_main *self) { char *pos; if (strncmp(ciphertext, "$NETNTLM$", 9)!=0) return 0; if ((strlen(ciphertext) != 74) && (strlen(ciphertext) != 90)) return 0; if ((ciphertext[25] != '$') && (ciphertext[41] != '$')) return 0; for (pos = &ciphertext[9]; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++); if (*pos != '$') return 0; for (pos++; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++); if (!*pos && ((pos - ciphertext - 26 == CIPHERTEXT_LENGTH) || (pos - ciphertext - 42 == CIPHERTEXT_LENGTH))) { uchar key[7] = {0, 0, 0, 0, 0, 0, 0}; DES_key_schedule ks; DES_cblock b3cmp; uchar binary[8]; DES_cblock *challenge = ntlm_get_salt(ciphertext); int i, j; ciphertext = strrchr(ciphertext, '$') + 1 + 2 * 8 * 2; for (i = 0; i < 8; i++) { binary[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] << 4; binary[i] |= atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])]; } key[0] = valid_i; key[1] = valid_j; setup_des_key(key, &ks); DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT); if (!memcmp(binary, &b3cmp, 8)) return 1; for (i = 0; i < 0x100; i++) for (j = 0; j < 0x100; j++) { key[0] = i; key[1] = j; setup_des_key(key, &ks); DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT); if (!memcmp(binary, &b3cmp, 8)) { valid_i = i; valid_j = j; return 1; } } #ifdef DEBUG if (!bench_running) fprintf(stderr, "Rejected NetNTLM hash with invalid " "3rd block\n"); #endif } return 0; } static char *ntlm_prepare(char *split_fields[10], struct fmt_main *self) { char *cp; char clientChal[17]; if (!strncmp(split_fields[1], "$NETNTLM$", 9)) return split_fields[1]; if (!split_fields[3]||!split_fields[4]||!split_fields[5]) return split_fields[1]; if (strlen(split_fields[4]) != CIPHERTEXT_LENGTH) return split_fields[1]; // this string suggests we have an improperly formatted NTLMv2 if (!strncmp(&split_fields[4][32], "0101000000000000", 16)) return split_fields[1]; // Ignore anonymous login (Username "", Password "") if (split_fields[0] && strlen(split_fields[0]) == 0 && !strncasecmp(split_fields[3], "edb7398877d716be", 16) && !strncasecmp(split_fields[4], "42aeb71fbb6dc18499016b08" "b178ba65430ad39ae2498629", 48)) return split_fields[1]; // Handle ESS (8 byte client challenge in "LM" field padded with zeros) if (strlen(split_fields[3]) == 48 && !strncmp(&split_fields[3][16], "00000000000000000000000000000000", 32)) { memcpy(clientChal, split_fields[3],16); clientChal[16] = 0; } else clientChal[0] = 0; cp = mem_alloc(9+strlen(split_fields[5])+strlen(clientChal)+1+ strlen(split_fields[4])+1); sprintf(cp, "$NETNTLM$%s%s$%s", split_fields[5], clientChal, split_fields[4]); if (ntlm_valid(cp,self)) { char *cp2 = str_alloc_copy(cp); MEM_FREE(cp); return cp2; } MEM_FREE(cp); return split_fields[1]; } static char *ntlm_split(char *ciphertext, int index, struct fmt_main *self) { static char out[NTLM_TOTAL_LENGTH + 1]; memset(out, 0, NTLM_TOTAL_LENGTH + 1); strcpy(out, ciphertext); strlwr(&out[8]); /* Exclude: $NETNTLM$ */ return out; } static void set_salt(void *salt) { challenge = salt; } static void clear_keys(void) { } // ISO-8859-1 to UCS-2, directly into vector key buffer static void set_key_ansi(char *_key, int index) { #ifdef MMX_COEF const uchar *key = (uchar*)_key; unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS(0, index)]; unsigned int len, temp2; len = 0; while((temp2 = *key++)) { unsigned int temp; if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1) { temp2 |= (temp << 16); *keybuf_word = temp2; } else { temp2 |= (0x80 << 16); *keybuf_word = temp2; len++; goto key_cleaning; } len += 2; keybuf_word += MMX_COEF; } *keybuf_word = 0x80; key_cleaning: keybuf_word += MMX_COEF; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += MMX_COEF; } ((unsigned int*)saved_key)[14*MMX_COEF + (index&3) + (index>>2)*16*MMX_COEF] = len << 4; #else #if ARCH_LITTLE_ENDIAN UTF8 *s = (UTF8*)_key; UTF16 *d = saved_key[index]; while (*s) *d++ = *s++; *d = 0; saved_key_length[index] = (int)((char*)d - (char*)saved_key[index]); #else UTF8 *s = (UTF8*)_key; UTF8 *d = (UTF8*)saved_key[index]; while (*s) { *d++ = *s++; ++d; } *d = 0; saved_key_length[index] = (int)((char*)d - (char*)saved_key[index]); #endif #endif keys_prepared = 0; } // Legacy codepage to UCS-2, directly into vector key buffer static void set_key_CP(char *_key, int index) { #ifdef MMX_COEF const uchar *key = (uchar*)_key; unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS(0, index)]; unsigned int len, temp2; len = 0; while((temp2 = *key++)) { unsigned int temp; temp2 = CP_to_Unicode[temp2]; if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1) { temp = CP_to_Unicode[temp]; temp2 |= (temp << 16); *keybuf_word = temp2; } else { temp2 |= (0x80 << 16); *keybuf_word = temp2; len++; goto key_cleaning_enc; } len += 2; keybuf_word += MMX_COEF; } *keybuf_word = 0x80; key_cleaning_enc: keybuf_word += MMX_COEF; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += MMX_COEF; } ((unsigned int*)saved_key)[14*MMX_COEF + (index&3) + (index>>2)*16*MMX_COEF] = len << 4; #else saved_key_length[index] = enc_to_utf16(saved_key[index], PLAINTEXT_LENGTH + 1, (uchar*)_key, strlen(_key)) << 1; if (saved_key_length[index] < 0) saved_key_length[index] = strlen16(saved_key[index]); #endif keys_prepared = 0; } // UTF-8 to UCS-2, directly into vector key buffer static void set_key_utf8(char *_key, int index) { #ifdef MMX_COEF const UTF8 *source = (UTF8*)_key; unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS(0, index)]; UTF32 chl, chh = 0x80; unsigned int len = 0; while (*source) { chl = *source; if (chl >= 0xC0) { unsigned int extraBytesToRead; extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f]; switch (extraBytesToRead) { #if NT_FULL_UNICODE case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; #endif case 2: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 1: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 0: break; default: goto bailout; } chl -= offsetsFromUTF8[extraBytesToRead]; } source++; len++; #if NT_FULL_UNICODE if (chl > UNI_MAX_BMP) { if (len == PLAINTEXT_LENGTH) { chh = 0x80; *keybuf_word = (chh << 16) | chl; keybuf_word += MMX_COEF; break; } #define halfBase 0x0010000UL #define halfShift 10 #define halfMask 0x3FFUL #define UNI_SUR_HIGH_START (UTF32)0xD800 #define UNI_SUR_LOW_START (UTF32)0xDC00 chl -= halfBase; chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);; chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START); len++; } else #endif if (*source && len < PLAINTEXT_LENGTH) { chh = *source; if (chh >= 0xC0) { unsigned int extraBytesToRead = opt_trailingBytesUTF8[chh & 0x3f]; switch (extraBytesToRead) { #if NT_FULL_UNICODE case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; #endif case 2: ++source; if (*source) { chh <<= 6; chh += *source; } else goto bailout; case 1: ++source; if (*source) { chh <<= 6; chh += *source; } else goto bailout; case 0: break; default: goto bailout; } chh -= offsetsFromUTF8[extraBytesToRead]; } source++; len++; } else { chh = 0x80; *keybuf_word = (chh << 16) | chl; keybuf_word += MMX_COEF; break; } *keybuf_word = (chh << 16) | chl; keybuf_word += MMX_COEF; } if (chh != 0x80 || len == 0) { *keybuf_word = 0x80; keybuf_word += MMX_COEF; } bailout: while(*keybuf_word) { *keybuf_word = 0; keybuf_word += MMX_COEF; } ((unsigned int*)saved_key)[14*MMX_COEF + (index&3) + (index>>2)*16*MMX_COEF] = len << 4; #else saved_key_length[index] = utf8_to_utf16(saved_key[index], PLAINTEXT_LENGTH + 1, (uchar*)_key, strlen(_key)) << 1; if (saved_key_length[index] < 0) saved_key_length[index] = strlen16(saved_key[index]); #endif keys_prepared = 0; } static void init(struct fmt_main *self) { #if defined (_OPENMP) && !defined(MMX_COEF) int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif my = self; if (pers_opts.target_enc == UTF_8) { self->methods.set_key = set_key_utf8; self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH); } else { if (pers_opts.target_enc != ASCII && pers_opts.target_enc != ISO_8859_1) self->methods.set_key = set_key_CP; } #if MMX_COEF saved_key = mem_calloc_tiny(sizeof(*saved_key) * 64 * self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); nthash = mem_calloc_tiny(sizeof(*nthash) * 16 * self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); #else saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); saved_key_length = mem_calloc_tiny(sizeof(*saved_key_length) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); nthash = mem_calloc_tiny(sizeof(*nthash) * 16 * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); #endif crypt_key = mem_calloc_tiny(sizeof(unsigned short) * self->params.max_keys_per_crypt, MEM_ALIGN_CACHE); bitmap = mem_calloc_tiny(0x10000 / 8, MEM_ALIGN_CACHE); use_bitmap = 0; /* we did not use bitmap yet */ cmps_per_crypt = 2; /* try bitmap */ } // Get the key back from the key buffer, from UCS-2 static char *get_key(int index) { #ifdef MMX_COEF unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS(0, index)]; static UTF16 key[PLAINTEXT_LENGTH + 1]; unsigned int md4_size=0; unsigned int i=0; for(; md4_size < PLAINTEXT_LENGTH; i += MMX_COEF, md4_size++) { key[md4_size] = keybuf_word[i]; key[md4_size+1] = keybuf_word[i] >> 16; if (key[md4_size] == 0x80 && key[md4_size+1] == 0) { key[md4_size] = 0; break; } ++md4_size; if (key[md4_size] == 0x80 && ((keybuf_word[i+MMX_COEF]&0xFFFF) == 0 || md4_size == PLAINTEXT_LENGTH)) { key[md4_size] = 0; break; } } return (char*)utf16_to_enc(key); #else return (char*)utf16_to_enc(saved_key[index]); #endif } static void *get_binary(char *ciphertext) { static uchar *binary; static int warned = 0, loaded = 0; DES_cblock *challenge = my->methods.salt(ciphertext); int i, j; if (!binary) binary = mem_alloc_tiny(FULL_BINARY_SIZE, BINARY_ALIGN); if (!warned && !ldr_in_pot && !bench_running && ++loaded > 100) { warned = 1; fprintf(stderr, "%s: Note: slow loading. For short runs, try " "--format=%s-naive\ninstead. That version loads " "faster but runs slower.\n", my->params.label, my->params.label); } if (chap_valid_short(ciphertext)) ciphertext += 10 + CHAP_CHALLENGE_LENGTH / 4 + 1; else if (chap_valid_long(ciphertext)) ciphertext += 10 + CHAP_CHALLENGE_LENGTH / 2 + 1; else /* ntlmv1 */ ciphertext = strrchr(ciphertext, '$') + 1; for (i = 0; i < FULL_BINARY_SIZE - 2; i++) { binary[2 + i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] << 4; binary[2 + i] |= atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])]; } { uchar key[7] = {0, 0, 0, 0, 0, 0, 0}; DES_key_schedule ks; DES_cblock b3cmp; key[0] = valid_i; key[1] = valid_j; setup_des_key(key, &ks); DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT); if (!memcmp(&binary[2 + 8 * 2], &b3cmp, 8)) { binary[0] = valid_i; binary[1] = valid_j; goto out; } for (i = 0; i < 0x100; i++) for (j = 0; j < 0x100; j++) { key[0] = i; key[1] = j; setup_des_key(key, &ks); DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT); if (!memcmp(&binary[2 + 8 * 2], &b3cmp, 8)) { binary[0] = i; binary[1] = j; goto out; } } fprintf(stderr, "Bug: %s hash with invalid 3rd block, should " "have been rejected in valid()\n", my->params.label); binary[0] = binary[1] = 0x55; } out: return binary; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; if (!keys_prepared) { int i = 0; if (use_bitmap) { #if MAX_KEYS_PER_CRYPT >= 200 //#warning Notice: Using memset memset(bitmap, 0, 0x10000 / 8); #else //#warning Notice: Not using memset #ifdef MMX_COEF for (i = 0; i < NBKEYS * BLOCK_LOOPS; i++) #else for (i = 0; i < count; i++) #endif { unsigned int value = crypt_key[i]; bitmap[value >> 5] = 0; } #endif } use_bitmap = cmps_per_crypt >= 2; cmps_per_crypt = 0; #ifdef MMX_COEF #if (BLOCK_LOOPS > 1) #if defined(_OPENMP) && defined(SSE_OMP) #pragma omp parallel for #endif for (i = 0; i < BLOCK_LOOPS; i++) SSEmd4body(&saved_key[i * NBKEYS * 64], (unsigned int*)&nthash[i * NBKEYS * 16], NULL, SSEi_MIXED_IN); #else SSEmd4body(saved_key, (unsigned int*)nthash, 1); #endif if (use_bitmap) for (i = 0; i < NBKEYS * BLOCK_LOOPS; i++) { unsigned int value; value = *(ARCH_WORD_32*) &nthash[GETOUTPOS(12, i)] >> 16; crypt_key[i] = value; bitmap[value >> 5] |= 1U << (value & 0x1f); } else for (i = 0; i < NBKEYS * BLOCK_LOOPS; i++) { crypt_key[i] = *(ARCH_WORD_32*) &nthash[GETOUTPOS(12, i)] >> 16; } #else #if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1) #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < count; i++) #endif { MD4_CTX ctx; MD4_Init( &ctx ); MD4_Update(&ctx, saved_key[i], saved_key_length[i]); MD4_Final((uchar*)&nthash[i * 16], &ctx); crypt_key[i] = ((unsigned short*)&nthash[i * 16])[7]; if (use_bitmap) { unsigned int value = crypt_key[i]; bitmap[value >> 5] |= 1U << (value & 0x1f); } } #endif keys_prepared = 1; } return count; } static int cmp_one(void *binary, int index) { if (crypt_key[index] == *(unsigned short*)binary) { DES_key_schedule ks; DES_cblock computed_binary; unsigned int key[2]; #ifdef MMX_COEF int i; for (i = 0; i < 2; i++) key[i] = *(ARCH_WORD_32*) &nthash[GETOUTPOS(4 * i, index)]; #else memcpy(key, &nthash[index * 16], 8); #endif setup_des_key((unsigned char*)key, &ks); DES_ecb_encrypt((DES_cblock*)challenge, &computed_binary, &ks, DES_ENCRYPT); return !memcmp(((char*)binary) + 2, computed_binary, 8); } return 0; } static int cmp_all(void *binary, int count) { unsigned int value = *(unsigned short*)binary; int index; cmps_per_crypt++; if (use_bitmap && !(bitmap[value >> 5] & (1U << (value & 0x1f)))) goto out; #ifdef MMX_COEF /* Let's give the optimizer a hint! */ for (index = 0; index < NBKEYS * BLOCK_LOOPS; index += 2) #else for (index = 0; index < count; index += 2) #endif { unsigned int a = crypt_key[index]; unsigned int b = crypt_key[index + 1]; #if 0 if (((a | b) & value) != value) continue; #endif if (a == value || b == value) goto thorough; } goto out; thorough: #ifdef MMX_COEF for (index = 0; index < NBKEYS * BLOCK_LOOPS; index++) #else for (; index < count; index++) #endif { if (crypt_key[index] == value && cmp_one(binary, index)) return 1; } out: return 0; } static int cmp_exact(char *source, int index) { DES_key_schedule ks; uchar binary[24]; unsigned char key[21]; char *cp; int i; #ifdef MMX_COEF for (i = 0; i < 4; i++) ((ARCH_WORD_32*)key)[i] = *(ARCH_WORD_32*) &nthash[GETOUTPOS(4 * i, index)]; #else memcpy(key, &nthash[index * 16], 16); #endif /* Hash is NULL padded to 21-bytes */ memset(&key[16], 0, 5); /* Split into three 7-byte segments for use as DES keys Use each key to DES encrypt challenge Concatenate output to for 24-byte NTLM response */ setup_des_key(key, &ks); DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)binary, &ks, DES_ENCRYPT); setup_des_key(&key[7], &ks); DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)&binary[8], &ks, DES_ENCRYPT); setup_des_key(&key[14], &ks); DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)&binary[16], &ks, DES_ENCRYPT); // With the normalized source we simply need to skip the // $MSCHAPv2$hhhhhhhhhhhhhhhh$ string to get 'real' binary data. // $NETNTLM$c75c20bff9baa71f4765f360625700b0$ cp = &source[11]; cp = strchr(cp, '$'); ++cp; for (i = 0; i < 24; ++i) { unsigned char c = (atoi16[ARCH_INDEX(*cp)] << 4) + (atoi16[ARCH_INDEX(*(cp+1))] ); if (c != binary[i]) return 0; cp += 2; } return 1; } static int salt_hash(void *salt) { return *(ARCH_WORD_32*)salt & (SALT_HASH_SIZE - 1); } static int binary_hash_0(void *binary) { return *(unsigned short*)binary & 0xF; } static int binary_hash_1(void *binary) { return *(unsigned short*)binary & 0xFF; } static int binary_hash_2(void *binary) { return *(unsigned short*)binary & 0xFFF; } static int binary_hash_3(void *binary) { return *(unsigned short*)binary & 0xFFFF; } static int get_hash_0(int index) { return crypt_key[index] & 0xF; } static int get_hash_1(int index) { return crypt_key[index] & 0xFF; } static int get_hash_2(int index) { return crypt_key[index] & 0xFFF; } static int get_hash_3(int index) { return crypt_key[index] & 0xFFFF; } struct fmt_main fmt_MSCHAPv2_new = { { CHAP_FORMAT_LABEL, CHAP_FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #if !defined(MMX_COEF) || (defined(MMX_COEF) && defined(SSE_OMP)) FMT_OMP | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8, #if FMT_MAIN_VERSION > 11 { NULL }, #endif chap_tests }, { init, fmt_default_done, fmt_default_reset, chap_prepare, chap_valid, chap_split, get_binary, chap_get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, NULL, NULL, NULL }, salt_hash, set_salt, set_key_ansi, get_key, clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, NULL, NULL, NULL }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_NETNTLM_new = { { NTLM_FORMAT_LABEL, NTLM_FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #if !defined(MMX_COEF) || (defined(MD4_SSE_PARA) && defined(SSE_OMP)) FMT_OMP | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8, #if FMT_MAIN_VERSION > 11 { NULL }, #endif ntlm_tests }, { init, fmt_default_done, fmt_default_reset, ntlm_prepare, ntlm_valid, ntlm_split, get_binary, ntlm_get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, NULL, NULL, NULL }, salt_hash, set_salt, set_key_ansi, get_key, clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, NULL, NULL, NULL }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
mixed_tentusscher_myo_epi_2004_S3_5.c
// Scenario 3 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt + Rc) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S3_5.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5624423262106,0.00129167275470664,0.779566791615456,0.779410913961902,0.000174854203482290,0.485029627881613,0.00294151853152467,0.999998346761065,1.93536430763538e-08,1.89253322511936e-05,0.999770718781989,1.00705290160006,0.999994726153831,4.61702239557613e-05,0.428543055333246,10.5677098069322,139.088209861540}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.4147498667915,0.000251124617142363,0.000137013485769181,0.000169470481210674,0.253895228590867,0.152733305273802,0.167845467014990,4.51645656101753,0.0160856241829014,1.32915393527547,1099.63821888814,0.000521101954310938,0.130615895825142,0.0198817586800201,0.00476047046076979,6.04955465909554e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
map_alm_qu_eb.h
/************************************************************** Compute map2alm_pol and alm2map_pol for QU and EB only ***************************************************************/ #ifdef UPDATE_HEALPIX3_60_SOLVED #include <vector> //#include "alm_healpix_tools.h" //#include "alm_map_tools.h" #include "alm.h" #include "arr.h" // #include "fftpack_support.h" #include "ylmgen.h" #include "healpix_map.h" #include "xcomplex.h" using namespace std; /*! A class holding information about a ring of pixels in a spherical map. */ class ringinfo { public: double theta, phi0, weight, cth, sth; int nph, ofs; ringinfo() : nph(0) {} //! Constructs a \a ringinfo object. // \param theta_ colatitude of the ring (in radian) // \param phi0_ longitude of the first pixel in the ring (in radian) // \param weight_ weighting factor for all pixels in the ring. This is typically the surface of a pixel in sterad. // \note \a weight_ is only needed for map analysis, not synthesis. // \param nph_ number of pixels in the ring // \param ofs_ index of the first ring pixel in the total map array (counting from zero) ringinfo (double theta_, double phi0_, double weight_, int nph_, int ofs_) : theta(theta_), phi0(phi0_), weight(weight_), cth(cos(theta)), sth(sin(theta)), nph(nph_), ofs(ofs_) {} }; /*! A class holding information about a ring pair in a spherical map. */ class ringpair { public: ringinfo r1, r2; //! Initialize the object with the ring described by \a info. The second ring is left empty. ringpair (const ringinfo &info) : r1(info) {} //! Initialize the object with the rings described by \a info1 and \a info2. // \note The colatitude of \a info2 must be \f$\pi\f$ minus the colatitude of \a info1. ringpair (const ringinfo &info1,const ringinfo &info2) : r1(info1), r2(info2) { planck_assert( approx( r1.theta, pi-r2.theta, 1e-10 ), "invalid ringpair" ); } }; namespace { struct info_comparator { inline bool operator()( const ringinfo &a, const ringinfo &b ){ return a.sth<b.sth; } }; struct pair_comparator { inline bool operator()(const ringpair &a, const ringpair &b) { if( a.r1.nph == b.r1.nph ) { return a.r1.phi0<b.r1.phi0; } else { return a.r1.nph<b.r1.nph; } } }; void init_lam_fact_1d( int m, arr<double> &lam_fact ) { for( int l=m; l < (int) lam_fact.size(); ++l) { lam_fact[l] = (l<2) ? 0. : 2*sqrt((2*l+1.)/(2*l-1.) * (l*l-m*m)); } } void init_lam_fact_deriv_1d( int m, arr<double> &lam_fact ) { lam_fact[m]=0; for( int l=m+1; l < (int) lam_fact.size(); ++l) { lam_fact[l] = sqrt((2*l+1.)/(2*l-1.) * (l*l-m*m)); } } void init_normal_l( arr<double> &normal_l ) { for( int l=0; l < (int) normal_l.size(); ++l) { normal_l[l] = (l<2) ? 0. : sqrt(1./((l+2.)*(l+1.)*l*(l-1.))); } } void get_chunk_info( int nrings, int &nchunks, int &chunksize ) { nchunks = nrings/max(100,nrings/10) + 1; chunksize = ( nrings + nchunks -1 )/nchunks; } class ringhelper { private: double phi0_; arr<xcomplex<double> > shiftarr, work; rfft plan; bool norot; void update( int nph, int mmax, double phi0 ) { norot = ( abs(phi0) < 1e-14 ); if( !norot ) { if( ( mmax != (int) shiftarr.size()-1 ) || ( !approx( phi0, phi0_, 1e-12 ) ) ) { shiftarr.alloc(mmax+1); phi0_ = phi0; for( int m=0; m <= mmax; ++m) { shiftarr[m] = xcomplex<REAL> (cos(m*phi0), sin(m*phi0) ); } } } if( nph != (int) plan.size() ) { plan.Set(nph); } if( nph > (int) work.size() ) { work.alloc(2*nph); } } public: ringhelper() : phi0_(0), norot(true) {} template<typename T> void phase2ring( int nph, int mmax, double phi0, const xcomplex<double> *phase, T *ring ) { update( nph, mmax, phi0 ); for( int m=1; m < nph; ++m ) { work[m]=0; } work[0] = phase[0]; if(norot) { for( int m=1; m <= mmax; ++m ) { work[m%nph] += phase[m]; work[nph-1-((m-1)%nph)] += conj(phase[m]); } } else { for( int m=1; m <= mmax; ++m ) { xcomplex<double> tmp = phase[m]*shiftarr[m]; work[m%nph] += tmp; work[nph-1-((m-1)%nph)] += conj(tmp); } } plan.backward_c( work ); for( int m=0; m < nph; ++m ) { ring[m] = real(work[m]); } } template<typename T> void phase2ring( int mmax, const xcomplex<double> *phase, const ringinfo &info, T *data ) { if( info.nph > 0 ) { phase2ring( info.nph, mmax, info.phi0, phase, data+info.ofs ); } } template<typename T> void phase2pair( int mmax, const xcomplex<double> *phase1, const xcomplex<double> *phase2, const ringpair &pair, T *data ) { phase2ring( mmax, phase1, pair.r1, data ); phase2ring( mmax, phase2, pair.r2, data ); } template<typename T> void ring2phase( int nph, int mmax, double phi0, double weight, const T *ring, xcomplex<double> *phase ) { update( nph, mmax, -phi0 ); for( int m=0; m < nph; ++m ) { work[m] = ring[m]*weight; } plan.forward_c( work ); if( norot ) { for( int m=0; m <= mmax; ++m ) { phase[m] = work[m%nph]; } } else { for( int m=0; m <= mmax; ++m ) { phase[m] = work[m%nph]*shiftarr[m]; } } } template<typename T> void ring2phase( int mmax, const ringinfo &info, const T *data, xcomplex<double> *phase ) { if( info.nph > 0 ) { ring2phase( info.nph, mmax, info.phi0, info.weight, data+info.ofs, phase ); } } template<typename T> void pair2phase( int mmax, const ringpair &pair, const T *data, xcomplex<double> *phase1, xcomplex<double> *phase2 ) { ring2phase( mmax, pair.r1, data, phase1 ); ring2phase( mmax, pair.r2, data, phase2 ); } }; void healpix2ringpairs( const Healpix_Base &base, const arr<double> &weight, std::vector<ringpair> &pair ) { pair.clear(); int startpix, ringpix; double theta, wgt, phi0; bool shifted; int nside = base.Nside(); for(int m=0; m < 2*nside-1; ++m) { base.get_ring_info2( m+1, startpix, ringpix, theta, shifted ); wgt = weight[m]*fourpi/base.Npix(); phi0 = shifted ? pi/ringpix : 0; pair.push_back( ringpair( ringinfo( theta, phi0, wgt, ringpix, startpix ), ringinfo( pi-theta, phi0, wgt, ringpix, base.Npix()-startpix-ringpix ) ) ); } base.get_ring_info2( 2*nside, startpix, ringpix, theta, shifted ); wgt = weight[2*nside-1]*fourpi/base.Npix(); phi0 = shifted ? pi/ringpix : 0; pair.push_back( ringpair( ringinfo( theta, phi0, wgt, ringpix, startpix ) ) ); } void healpix2ringpairs( const Healpix_Base &base, std::vector<ringpair> &pair ) { arr<double> wgt( 2*base.Nside() ); wgt.fill(1); healpix2ringpairs( base, wgt, pair ); } } // namespace /* void info2pair( const std::vector<ringpair> &info, std::vector<ringpair> &pair ) { pair.clear(); vector<ringinfo> info2=info; sort( info2.begin(), info2.end(), info_comparator() ); unsigned int pos=0; while( pos < info2.size()-1 ) { if( approx( info2[pos].cth, -info2[pos+1].cth, 1e-12 ) ) { pair.push_back( ringpair( info2[pos], info2[pos+1] ) ); pos += 2; } else { pair.push_back( ringpair( info2[pos] ) ); ++pos; } } if( pos < info2.size() ) { pair.push_back(info2[pos]); } sort( pair.begin(), pair.end(), pair_comparator() ); } */ /***************************************************************************************************************************/ //void alm2map_pol_QU( /*const Alm<xcomplex<T> > &almT,*/ const Alm<xcomplex<T> > &almE, const Alm<xcomplex<T> > &almB, const vector<ringpair> &pair, /* T *mapT,*/ T *mapQ, T *mapU ) template<typename T> void alm2map_pol_QU( const Alm<xcomplex<T> > &almE, const Alm<xcomplex<T> > &almB, const std::vector<ringpair> &pair, T *mapQ, T *mapU ) { int lmax = almE.Lmax(); int mmax = almE.Mmax(); planck_assert( almE.conformable(almB), "alm2map_pol: a_lm are not conformable" ); arr<double> normal_l (lmax+1); init_normal_l( normal_l ); int nchunks, chunksize; get_chunk_info( pair.size(), nchunks, chunksize ); arr2<xcomplex<double> > phas1Q(chunksize,mmax+1), phas2Q(chunksize,mmax+1), phas1U(chunksize,mmax+1), phas2U(chunksize,mmax+1);//phas1T(chunksize,mmax+1), phas2T(chunksize,mmax+1) for(int chunk=0; chunk < nchunks; ++chunk) { int llim = chunk*chunksize, ulim = min( llim+chunksize, int(pair.size()) ); #pragma omp parallel { Ylmgen generator( lmax, mmax, 1e-30 ); arr<double> Ylm; arr<double> lam_fact (lmax+1); arr<xcomplex<double>[2]> alm_tmp(lmax+1); int m; #pragma omp for schedule(dynamic,1) for( m=0; m <= mmax; ++m ) { int m2 = m*m; init_lam_fact_1d( m, lam_fact ); for(int l=m; l <= lmax; ++l) { //alm_tmp[l][0] = almT(l,m); // alm_tmp[l][0].re = almE(l,m).re *(-normal_l[l]); // alm_tmp[l][0].im = almE(l,m).im *(-normal_l[l]); alm_tmp[l][0] = almE(l,m) * xcomplex<REAL>(-normal_l[l],0.); // alm_tmp[l][1].re = almB(l,m).re*(-normal_l[l]); //alm_tmp[l][1].im = almB(l,m).im*(-normal_l[l]); alm_tmp[l][1] = almB(l,m) * xcomplex<REAL>(-normal_l[l],0.); } for(int ith=0; ith < ulim-llim; ++ith) { double cth=pair[ith+llim].r1.cth, sth=pair[ith+llim].r1.sth; int l; generator.get_Ylm( cth, sth, m, Ylm, l ); if( l <= lmax ) { double one_on_s2 = 1/(sth*sth); double c_on_s2 = cth * one_on_s2; double two_on_s2 = 2*one_on_s2; double m_on_s2 = m*one_on_s2; double twocth = 2*cth; if( pair[ith+llim].r2.nph > 0 ) { xcomplex<double> Q1=0, Q2=0, U1=0, U2=0;//T1=0, T2=0 double lam_lm = 0; if( (l-m)&1 ) { //ALM2MAP_POL_MACRO_QU(Q2,Q1,U2,U1) double lam_lm1m = lam_lm; lam_lm = Ylm[l]; double t1 = lam_lm1m*lam_fact[l]; double a_w = (l-m2)*two_on_s2 + l*(l-1); double a_x = twocth*(l-1)*lam_lm; xcomplex<double> lambda_w = xcomplex<double>(a_w*lam_lm - t1*c_on_s2,0.); xcomplex<double> lambda_x = xcomplex<double>(m_on_s2 * (a_x-t1),0.); // Q2.re += alm_tmp[l][0].re*lambda_w; // Q2.im += alm_tmp[l][0].im*lambda_w; Q2 += alm_tmp[l][0] * lambda_w; // U2.re -= alm_tmp[l][1].re*lambda_w; // U2.im -= alm_tmp[l][1].im*lambda_w; U2 -= alm_tmp[l][1] * lambda_w; // Q1.re -= alm_tmp[l][1].im*lambda_x; // Q1.im += alm_tmp[l][1].re*lambda_x; Q1 += xcomplex<REAL>( - imag(alm_tmp[l][1]) lambda_x), real(alm_tmp[l][1] * lambda_x); // U1.re -= alm_tmp[l][0].im*lambda_x; // U1.im += alm_tmp[l][0].re*lambda_x; U1 += xcomplex<REAL>( - imag(alm_tmp[l][0] * lambda_x), real(alm_tmp[l][0]* lambda_x)); ++l; } for( ; l < lmax ; ) { //ALM2MAP_POL_MACRO_QU(Q1,Q2,U1,U2) double lam_lm1m = lam_lm; lam_lm = Ylm[l]; double t1 = lam_lm1m*lam_fact[l]; double a_w = (l-m2)*two_on_s2 + l*(l-1); double a_x = twocth*(l-1)*lam_lm; xcomplex<double> lambda_w = xcomplex<double> (a_w*lam_lm - t1*c_on_s2,0.); xcomplex<double> lambda_x = xcomplex<double> (m_on_s2 * (a_x-t1),0.); // Q1.re += alm_tmp[l][0].re*lambda_w; // Q1.im += alm_tmp[l][0].im*lambda_w; Q1 += alm_tmp[l][0] * lambda_w; // U1.re -= alm_tmp[l][1].re*lambda_w; // U1.im -= alm_tmp[l][1].im*lambda_w; U1 += alm_tmp[l][1] * lambda_w; // Q2.re -= alm_tmp[l][1].im*lambda_x; // Q2.im += alm_tmp[l][1].re*lambda_x; Q2 += xcomplex<REAL>( - imag(alm_tmp[l][1] * lambda_x), real(alm_tmp[l][1]* lambda_x)); // U2.re -= alm_tmp[l][0].im*lambda_x; // U2.im += alm_tmp[l][0].re*lambda_x; U2 += xcomplex<REAL>( - imag(alm_tmp[l][0] * lambda_x), real(alm_tmp[l][0]* lambda_x)); ++l; //ALM2MAP_POL_MACRO_QU(Q2,Q1,U2,U1) lam_lm1m = lam_lm; lam_lm = Ylm[l]; t1 = lam_lm1m*lam_fact[l]; a_w = (l-m2)*two_on_s2 + l*(l-1); a_x = twocth*(l-1)*lam_lm; lambda_w = a_w*lam_lm - t1*c_on_s2; lambda_x = m_on_s2 * (a_x-t1); // Q2.re += alm_tmp[l][0].re*lambda_w; // Q2.im += alm_tmp[l][0].im*lambda_w; Q2 += alm_tmp[l][0] * xcomplex<REAL>(lambda_w,0.); // U2.re -= alm_tmp[l][1].re*lambda_w; // U2.im -= alm_tmp[l][1].im*lambda_w; U2 -= alm_tmp[l][1] * xcomplex<REAL>(lambda_w,0.); // Q1.re -= alm_tmp[l][1].im*lambda_x; // Q1.im += alm_tmp[l][1].re*lambda_x; Q1 += xcomplex<REAL>( - imag(alm_tmp[l][1] * lambda_x), real(alm_tmp[l][1] * lambda_x)); // U1.re -= alm_tmp[l][0].im*lambda_x; // U1.im += alm_tmp[l][0].re*lambda_x; U1 += xcomplex<REAL>( - imag(alm_tmp[l][0] * lambda_x), real(alm_tmp[l][0]* lambda_x)); ++l; } if( l == lmax ) { //ALM2MAP_POL_MACRO_QU(Q1,Q2,U1,U2) double lam_lm1m = lam_lm; lam_lm = Ylm[l]; double t1 = lam_lm1m*lam_fact[l]; double a_w = (l-m2)*two_on_s2 + l*(l-1); double a_x = twocth*(l-1)*lam_lm; xcomplex<double> lambda_w = xcomplex<double>(a_w*lam_lm - t1*c_on_s2,0.); xcomplex<double> lambda_x = xcomplex<double>(m_on_s2 * (a_x-t1),0.); // Q1.re += alm_tmp[l][0].re*lambda_w; // Q1.im += alm_tmp[l][0].im*lambda_w; Q1 += alm_tmp[l][0] * lambda_w; // U1.re -= alm_tmp[l][1].re*lambda_w; // U1.im -= alm_tmp[l][1].im*lambda_w; U1 -= alm_tmp[l][1] * lambda_w; // Q2.re -= alm_tmp[l][1].im*lambda_x; // Q2.im += alm_tmp[l][1].re*lambda_x; Q2 += xcomplex<REAL>( - imag(alm_tmp[l][1]) * lambda_x, real(alm_tmp[l][1])* lambda_x); // U2.re -= alm_tmp[l][0].im*lambda_x; // U2.im += alm_tmp[l][0].re*lambda_x; U2 += xcomplex<REAL>( - imag(alm_tmp[l][0]) * lambda_x, real(alm_tmp[l][0]) * lambda_x); ++l; } //phas1T[ith][m] = T1+T2; //phas2T[ith][m] = T1-T2; phas1Q[ith][m] =-Q1-Q2; phas2Q[ith][m] =-Q1+Q2; phas1U[ith][m] = U1+U2; phas2U[ith][m] = U1-U2; } else { xcomplex<double> Q1=0, U1=0;//T1=0, double lam_lm = 0; for( ; l <= lmax; ) { //ALM2MAP_POL_MACRO_QU(Q1,Q1,U1,U1) double lam_lm1m = lam_lm; lam_lm = Ylm[l]; double t1 = lam_lm1m*lam_fact[l]; double a_w = (l-m2)*two_on_s2 + l*(l-1); double a_x = twocth*(l-1)*lam_lm; xcomplex<REAL> lambda_w = xcomplex<REAL>(a_w*lam_lm - t1*c_on_s2,0.); xcomplex<REAL> lambda_x = xcomplex<REAL>(m_on_s2 * (a_x-t1),0.); // Q1.re += alm_tmp[l][0].re*lambda_w; // Q1.im += alm_tmp[l][0].im*lambda_w; Q1 += alm_tmp[l][0] * lambda_w; // U1.re -= alm_tmp[l][1].re*lambda_w; // U1.im -= alm_tmp[l][1].im*lambda_w; U1 -= alm_tmp[l][1] * lambda_w; // JLS: is there a bug here: Q1 and U1 again. // Q1.re -= alm_tmp[l][1].im*lambda_x; // Q1.im += alm_tmp[l][1].re*lambda_x; Q1 += xcomplex<REAL>( - imag(alm_tmp[l][1]) * lambda_x, real(alm_tmp[l][1])* lambda_x); // U1.re -= alm_tmp[l][0].im*lambda_x; // U1.im += alm_tmp[l][0].re*lambda_x; U1 += xcomplex<REAL>( - imag(alm_tmp[l][0]) * lambda_x, real(alm_tmp[l][0])* lambda_x); ++l; } //phas1T[ith][m] = T1; phas1Q[ith][m] =-Q1; phas1U[ith][m] = U1; } } else { //phas1T[ith][m] = phas2T[ith][m] = 0; phas1Q[ith][m] = phas2Q[ith][m] = 0; phas1U[ith][m] = phas2U[ith][m] = 0; } } } } // end of parallel region #pragma omp parallel { ringhelper helper; int ith; #pragma omp for schedule(dynamic,1) for( ith=llim; ith < ulim; ++ith ) { //helper.phase2pair( mmax, phas1T[ith-llim], phas2T[ith-llim], pair[ith], mapT ); helper.phase2pair( mmax, phas1Q[ith-llim], phas2Q[ith-llim], pair[ith], mapQ ); helper.phase2pair( mmax, phas1U[ith-llim], phas2U[ith-llim], pair[ith], mapU ); } } // end of parallel region } } template void alm2map_pol_QU( const Alm<xcomplex<float> > &almE, const Alm<xcomplex<float> > &almB, const std::vector<ringpair> &pair, float *mapQ, float *mapU ); template void alm2map_pol_QU( const Alm<xcomplex<double> > &almE, const Alm<xcomplex<double> > &almB, const std::vector<ringpair> &pair, double *mapQ, double *mapU ); //void map2alm_pol_QU( const vector<ringpair> &pair, /*const T *mapT,*/ const T *mapQ, const T *mapU, /*Alm<xcomplex<T> > &almT,*/ Alm<xcomplex<T> > &almE, Alm<xcomplex<T> > &almB, bool add_alm ) template<typename T> void map2alm_pol_QU( const std::vector<ringpair> &pair, const T *mapQ, const T *mapU, Alm<xcomplex<T> > &almE, Alm<xcomplex<T> > &almB, bool add_alm ) { planck_assert( almE.conformable(almB), "map2alm_pol: a_lm are not conformable" ); int lmax = almE.Lmax(), mmax = almE.Mmax(); arr<double> normal_l (lmax+1); init_normal_l( normal_l ); int nchunks, chunksize; get_chunk_info( pair.size(), nchunks, chunksize ); arr2<xcomplex<double> > phas1Q(chunksize,mmax+1), phas2Q(chunksize,mmax+1), phas1U(chunksize,mmax+1), phas2U(chunksize,mmax+1);//phas1T(chunksize,mmax+1), phas2T(chunksize,mmax+1) if( !add_alm ) { //almT.SetToZero(); almE.SetToZero(); almB.SetToZero(); } for( int chunk=0; chunk < nchunks; ++chunk ) { int llim = chunk*chunksize, ulim = min( llim+chunksize, int(pair.size()) ); #pragma omp parallel { ringhelper helper; int ith; #pragma omp for schedule(dynamic,1) for( ith=llim; ith < ulim; ++ith) { //helper.pair2phase( mmax, pair[ith], mapT, phas1T[ith-llim], phas2T[ith-llim] ); helper.pair2phase( mmax, pair[ith], mapQ, phas1Q[ith-llim], phas2Q[ith-llim] ); helper.pair2phase( mmax, pair[ith], mapU, phas1U[ith-llim], phas2U[ith-llim] ); } } // end of parallel region #pragma omp parallel { Ylmgen generator( lmax, mmax, 1e-30 ); arr<double> Ylm; arr<double> lam_fact(lmax+1); arr<xcomplex<double>[2] > alm_tmp(lmax+1); int m; #pragma omp for schedule(dynamic,1) for( m=0; m <= mmax; ++m ) { init_lam_fact_1d( m, lam_fact ); for( int l=m; l < (int) alm_tmp.size(); ++l ) { alm_tmp[l][0] = alm_tmp[l][1] = 0; } for( int ith=0; ith < ulim-llim; ++ith ) { int l; double cth=pair[ith+llim].r1.cth, sth=pair[ith+llim].r1.sth; generator.get_Ylm( cth, sth, m, Ylm, l ); if( l <= lmax ) { double one_on_s2 = 1/(sth*sth); double c_on_s2 = cth * one_on_s2; double two_on_s2 = 2*one_on_s2; double twocth = 2*cth; int m2 = m*m; double m_on_s2 = m*one_on_s2; if( pair[ith+llim].r2.nph > 0 ) { xcomplex<double> Q1 = phas1Q[ith][m]+phas2Q[ith][m], Q2 = phas1Q[ith][m]-phas2Q[ith][m], U1 = phas1U[ith][m]+phas2U[ith][m], U2 = phas1U[ith][m]-phas2U[ith][m]; //T1 = phas1T[ith][m]+phas2T[ith][m], T2 = phas1T[ith][m]-phas2T[ith][m] double lam_lm = 0; if( (l-m)&1 ) { //MAP2ALM_POL_MACRO_QU(Q2,Q1,U2,U1) double lam_lm1m=lam_lm; lam_lm=Ylm[l]; double t1 = lam_lm1m*lam_fact[l]; double a_w = (l-m2)*two_on_s2 + l*(l-1); double a_x = twocth*(l-1)*lam_lm; double lambda_w = a_w*lam_lm - t1*c_on_s2; double lambda_x = m_on_s2 * (a_x-t1); // alm_tmp[l][0].re += Q2.re*lambda_w - U1.im*lambda_x; // alm_tmp[l][0].im += Q2.im*lambda_w + U1.re*lambda_x; alm_tmp[l][0] += xcomplex<REAL>( real(Q2) * (REAL) lambda_x - imag(U1) * (REAL) lambda_x, imag(Q2) * (REAL) lambda_x + real(U1) * (REAL) lambda_x); alm_tmp[l][0] += Q2 * (REAL) lambda_w; alm_tmp[l][1] += xcomplex<REAL>( U2.real()*lambda_w + Q1.imag()*lambda_x, U2.imag()*lambda_w - Q1.real()*lambda_x); ++l; } for( ; l < lmax; ) { //MAP2ALM_POL_MACRO_QU(Q1,Q2,U1,U2) double lam_lm1m=lam_lm; lam_lm=Ylm[l]; double t1 = lam_lm1m*lam_fact[l]; double a_w = (l-m2)*two_on_s2 + l*(l-1); double a_x = twocth*(l-1)*lam_lm; double lambda_w = a_w*lam_lm - t1*c_on_s2; double lambda_x = m_on_s2 * (a_x-t1); // alm_tmp[l][0].re += Q1.re*lambda_w - U2.im*lambda_x; // alm_tmp[l][0].im += Q1.im*lambda_w + U2.re*lambda_x; alm_tmp[l][0] += xcomplex<REAL>( Q1.real()*lambda_w - U2.imag()*lambda_x, Q1.imag()*lambda_w + U2.real()*lambda_x); // alm_tmp[l][1].re += U1.re*lambda_w + Q2.im*lambda_x; // alm_tmp[l][1].im += U1.im*lambda_w - Q2.re*lambda_x; alm_tmp[l][1]+= xcomplex<REAL>(U1.real()*lambda_w + Q2.imag()*lambda_x,U1.imag()*lambda_w - Q2.real()*lambda_x); ++l; //MAP2ALM_POL_MACRO_QU(Q2,Q1,U2,U1) lam_lm1m=lam_lm; lam_lm=Ylm[l]; t1 = lam_lm1m*lam_fact[l]; a_w = (l-m2)*two_on_s2 + l*(l-1); a_x = twocth*(l-1)*lam_lm; lambda_w = a_w*lam_lm - t1*c_on_s2; lambda_x = m_on_s2 * (a_x-t1); // alm_tmp[l][0].re += Q2.re*lambda_w - U1.im*lambda_x; // alm_tmp[l][0].im += Q2.im*lambda_w + U1.re*lambda_x; alm_tmp[l][0] += xcomplex<REAL>(Q2.real()*lambda_w - U1.imag()*lambda_x, Q2.imag()*lambda_w + U1.real()*lambda_x); // alm_tmp[l][1].re += U2.re*lambda_w + Q1.im*lambda_x; // alm_tmp[l][1].im += U2.im*lambda_w - Q1.re*lambda_x; alm_tmp[l][1] += xcomplex<REAL>(U2.real()*lambda_w + Q1.imag()*lambda_x,U2.imag()*lambda_w - Q1.real()*lambda_x); ++l; } if( l == lmax ) { //MAP2ALM_POL_MACRO_QU(Q1,Q2,U1,U2) double lam_lm1m=lam_lm; lam_lm=Ylm[l]; double t1 = lam_lm1m*lam_fact[l]; double a_w = (l-m2)*two_on_s2 + l*(l-1); double a_x = twocth*(l-1)*lam_lm; double lambda_w = a_w*lam_lm - t1*c_on_s2; double lambda_x = m_on_s2 * (a_x-t1); // alm_tmp[l][0].re += Q1.re*lambda_w - U2.im*lambda_x; // alm_tmp[l][0].im += Q1.im*lambda_w + U2.re*lambda_x; alm_tmp[l][0] += xcomplex<REAL>(Q1.real()*lambda_w - U2.imag()*lambda_x,Q1.imag()*lambda_w + U2.real()*lambda_x); // alm_tmp[l][1].re += U1.re*lambda_w + Q2.im*lambda_x; // alm_tmp[l][1].im += U1.im*lambda_w - Q2.re*lambda_x; alm_tmp[l][1] += xcomplex<REAL>(U1.real()*lambda_w + Q2.imag()*lambda_x,U1.imag()*lambda_w - Q2.real()*lambda_x); ++l; } } else { xcomplex<double> Q1 = phas1Q[ith][m], U1 = phas1U[ith][m]; //T1 = phas1T[ith][m] double lam_lm = 0; for( ; l <= lmax; ) { //MAP2ALM_POL_MACRO_QU(Q1,Q1,U1,U1) double lam_lm1m=lam_lm; lam_lm=Ylm[l]; double t1 = lam_lm1m*lam_fact[l]; double a_w = (l-m2)*two_on_s2 + l*(l-1); double a_x = twocth*(l-1)*lam_lm; double lambda_w = a_w*lam_lm - t1*c_on_s2; double lambda_x = m_on_s2 * (a_x-t1); // alm_tmp[l][0].re += Q1.re*lambda_w - U1.im*lambda_x; // alm_tmp[l][0].im += Q1.im*lambda_w + U1.re*lambda_x; alm_tmp[l][0] += xcomplex<REAL>(Q1.real()*lambda_w - U1.imag()*lambda_x,Q1.imag()*lambda_w + U1.real()*lambda_x); // alm_tmp[l][1].re += U1.re*lambda_w + Q1.im*lambda_x; // alm_tmp[l][1].im += U1.im*lambda_w - Q1.re*lambda_x; alm_tmp[l][1] += xcomplex<REAL>(U1.real()*lambda_w + Q1.imag()*lambda_x,U1.imag()*lambda_w - Q1.real()*lambda_x); ++l; } } } } xcomplex<T> *palmE=almE.mstart(m), *palmB=almB.mstart(m); // *palmT=almT.mstart(m) for( int l=m; l <= lmax; ++l ) { //palmT[l].re += alm_tmp[l][0].re; //palmT[l].im += alm_tmp[l][0].im; // palmE[l].re += alm_tmp[l][0].re*normal_l[l]; // palmE[l].im += alm_tmp[l][0].im*normal_l[l]; palmE[l] += xcomplex<REAL>(alm_tmp[l][0].real() *normal_l[l], alm_tmp[l][0].imag() *normal_l[l]); // palmB[l].re += alm_tmp[l][1].re*normal_l[l]; // palmB[l].im += alm_tmp[l][1].im*normal_l[l]; almB[l] += xcomplex<REAL>(alm_tmp[l][1].real()*normal_l[l], alm_tmp[l][1].imag()*normal_l[l]); } } } // end of parallel region } } template void map2alm_pol_QU( const std::vector<ringpair> &pair, const float *mapQ, const float *mapU, Alm<xcomplex<float> > &almE, Alm<xcomplex<float> > &almB, bool add_alm ); template void map2alm_pol_QU( const std::vector<ringpair> &pair, const double *mapQ, const double *mapU, Alm<xcomplex<double> > &almE, Alm<xcomplex<double> > &almB, bool add_alm ); /*********************************************************************************************************************************************************************/ //void alm2map_pol_QU( /*const Alm<xcomplex<T> > &almT,*/ const Alm<xcomplex<T> > &almE, const Alm<xcomplex<T> > &almB, /*Healpix_Map<T> &mapT,*/ Healpix_Map<T> &mapQ, Healpix_Map<T> &mapU ) template<typename T> void alm2map_pol_QU( const Alm<xcomplex<T> > &almE, const Alm<xcomplex<T> > &almB, Healpix_Map<T> &mapQ, Healpix_Map<T> &mapU ) { /* planck_assert( mapT.Scheme()==RING, "alm2map_pol: maps must be in RING scheme" ); planck_assert( mapT.conformable( mapQ ) && mapT.conformable( mapU ), "alm2map_pol: maps are not conformable" ); */ planck_assert( mapQ.Scheme()==RING, "alm2map_pol: maps must be in RING scheme" ); planck_assert( mapQ.conformable( mapU ), "alm2map_pol: maps are not conformable" ); std::vector<ringpair> pair; healpix2ringpairs( mapQ, pair ); alm2map_pol_QU( almE, almB, pair, &mapQ[0], &mapU[0] ); } template void alm2map_pol_QU( const Alm<xcomplex<float> > &almE, const Alm<xcomplex<float> > &almB, Healpix_Map<float> &mapQ, Healpix_Map<float> &mapU ); template void alm2map_pol_QU( const Alm<xcomplex<double> > &almE, const Alm<xcomplex<double> > &almB, Healpix_Map<double> &mapQ, Healpix_Map<double> &mapU ); //void map2alm_pol_QU( /*const Healpix_Map<T> &mapT,*/ const Healpix_Map<T> &mapQ, const Healpix_Map<T> &mapU, /*Alm<xcomplex<T> > &almT,*/ Alm<xcomplex<T> > &almE, Alm<xcomplex<T> > &almB, const arr<double> &weight, bool add_alm ) template<typename T> void map2alm_pol_QU( const Healpix_Map<T> &mapQ, const Healpix_Map<T> &mapU, Alm<xcomplex<T> > &almE, Alm<xcomplex<T> > &almB, const arr<double> &weight, bool add_alm=false ) { /* planck_assert( mapT.Scheme()==RING, "map2alm_pol: maps must be in RING scheme" ); planck_assert( mapT.conformable( mapQ ) && mapT.conformable( mapU ), "map2alm_pol: maps are not conformable" ); planck_assert( weight.size() >= 2*mapT.Nside(), "map2alm_pol: at least one weight array has too few entries" ); */ planck_assert( mapQ.Scheme()==RING, "map2alm_pol: maps must be in RING scheme" ); planck_assert( mapQ.conformable( mapU ), "map2alm_pol: maps are not conformable" ); planck_assert( weight.size() >= (unsigned long) 2*mapQ.Nside(), "map2alm_pol: at least one weight array has too few entries" ); std::vector<ringpair> pair; healpix2ringpairs( mapQ, weight, pair ); map2alm_pol_QU( pair, &mapQ[0], &mapU[0], almE, almB, add_alm ); } template void map2alm_pol_QU( const Healpix_Map<float> &mapQ, const Healpix_Map<float> &mapU, Alm<xcomplex<float> > &almE, Alm<xcomplex<float> > &almB, const arr<double> &weight, bool add_alm ); template void map2alm_pol_QU( const Healpix_Map<double> &mapQ, const Healpix_Map<double> &mapU, Alm<xcomplex<double> > &almE, Alm<xcomplex<double> > &almB, const arr<double> &weight, bool add_alm ); //void map2alm_pol_iter_QU( /*const Healpix_Map<T> &mapT,*/ const Healpix_Map<T> &mapQ, const Healpix_Map<T> &mapU, /*Alm<xcomplex<T> > &almT,*/ Alm<xcomplex<T> > &almE, Alm<xcomplex<T> > &almB, int num_iter, const arr<double> weight ) template<typename T> void map2alm_pol_iter_QU( const Healpix_Map<T> &mapQ, const Healpix_Map<T> &mapU, Alm<xcomplex<T> > &almE, Alm<xcomplex<T> > &almB, int num_iter, const arr<double> weight ) { map2alm_pol_QU( mapQ, mapU, almE, almB, weight ); for(int iter=1; iter <= num_iter; ++iter) { Healpix_Map<T> mapQ2( mapQ.Nside(), mapQ.Scheme(), SET_NSIDE ), mapU2( mapQ.Nside(), mapQ.Scheme(), SET_NSIDE );//mapT2( mapT.Nside(), mapT.Scheme(), SET_NSIDE ) alm2map_pol_QU( almE, almB, mapQ2, mapU2 ); for(int m=0; m < mapQ.Npix(); ++m) { //mapT2[m] = mapT[m]-mapT2[m]; mapQ2[m] = mapQ[m]-mapQ2[m]; mapU2[m] = mapU[m]-mapU2[m]; } map2alm_pol_QU( mapQ2, mapU2, almE, almB, weight, true ); } } //template void map2alm_pol_iter_QU( const Healpix_Map<float> &mapQ, const Healpix_Map<float> &mapU, Alm<xcomplex<float> > &almE, Alm<xcomplex<float> > &almB, int num_iter, const arr<double> &weight ); //template void map2alm_pol_iter_QU( const Healpix_Map<double> &mapQ, const Healpix_Map<double> &mapU, Alm<xcomplex<double> > &almE, Alm<xcomplex<double> > &almB, int num_iter, const arr<double> &weight ); #endif
indicator_kriging.h
/* Copyright 2009 HPGL Team This file is part of HPGL (High Perfomance Geostatistics Library). HPGL is free software: you can redistribute it and/or modify it under the terms of the BSD License. You should have received a copy of the BSD License along with HPGL. */ #ifndef __GSALGO_INDICATOR_KRIGING_COMMAND_H__56F6BFCF_ACC9_4F63_A197_57316908E436__ #define __GSALGO_INDICATOR_KRIGING_COMMAND_H__56F6BFCF_ACC9_4F63_A197_57316908E436__ #include <ik_params.h> #include <sugarbox_grid.h> #include "progress_reporter.h" #include "cdf_utils.h" #include "pretty_printer.h" #include "my_kriging_weights.h" #include "precalculated_covariance.h" #include "kriging_interpolation.h" #include "neighbourhood_lookup.h" #include "is_informed_predicate.h" #include "cov_model.h" namespace hpgl { typedef std::vector<indicator_probability_t> marginal_probs_t; /*template< typename grid_t, typename data_t, typename means_t, typename weight_calculator_t>*/ namespace detail { template<typename cov_t> void create_precalucated_cov_models(const ik_params_t & params, std::vector<cov_t> & result) { result.resize(params.m_category_count); for (int i = 0; i < params.m_category_count; ++i) { result[i].init(cov_model_t(params.m_cov_params[i]), params.m_radiuses[i]); } } template<typename nl_t, typename prop_t> void add_defined_cells(std::vector<nl_t> & neighbour_lookups, const prop_t & prop) { for (int node = 0, end_node = prop.size(); node < end_node; ++node) { if (prop.is_informed(node)) { for (int i = 0, end_i = neighbour_lookups.size(); i < end_i; ++i) { neighbour_lookups[i].add_node(node); } } } } } template<typename grid_t, typename marginal_probs_t> void do_indicator_kriging( const indicator_property_array_t & input_property, const grid_t & grid, const ik_params_t & params, const marginal_probs_t & mps, indicator_property_array_t & output_property) { using namespace hpgl::detail; print_algo_name("Indicator Kriging"); print_params(params); progress_reporter_t report(grid.size()); typedef precalculated_covariances_t cov_t; std::vector<cov_t> covariances; create_precalucated_cov_models(params, covariances); typedef indexed_neighbour_lookup_t<grid_t, cov_t> nl_t; std::vector<nl_t> nblookups; for (int i = 0; i < params.m_category_count; ++i) { nblookups.push_back(nl_t(&grid, &covariances[i], params.m_nb_params[i])); } add_defined_cells(nblookups, input_property); std::vector<indicator_array_adapter_t> ind_props; for (int i = 0; i < params.m_category_count; ++i) { ind_props.push_back(indicator_array_adapter_t(&input_property, i)); } report.start(); size_t size = input_property.size(); #pragma omp parallel { #pragma omp for for (node_index_t node_idx = 0; node_idx < size; ++node_idx) { std::vector<indicator_probability_t> probs; for (int idx = 0; idx < params.m_category_count; ++idx) { indicator_probability_t prob; ki_result_t ki_result = kriging_interpolation(ind_props[idx], is_informed_predicate_t<indicator_property_array_t>(input_property), node_idx, covariances[idx], mps[idx], nblookups[idx], sk_weight_calculator_t(), prob); if (ki_result != KI_SUCCESS) { prob = mps[idx][node_idx]; } probs.push_back(prob); } output_property.set_at(node_idx, most_probable_category(probs)); #pragma omp critical { report.next_lap(); } } } report.stop(); std::cout << "\nDone. Average speed: " << report.iterations_per_second() << " point/sec." << std::endl; } } #endif //__GSALGO_INDICATOR_KRIGING_COMMAND_H__56F6BFCF_ACC9_4F63_A197_57316908E436__
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include <limits> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda/utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template <typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template <> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template <> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: { \ const OpReqType ReqType = kWriteTo; \ { __VA_ARGS__ } \ } break; \ case kAddTo: { \ const OpReqType ReqType = kAddTo; \ { __VA_ARGS__ } \ } break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: { \ const OpReqType ReqType = kNullOp; \ { __VA_ARGS__ } \ } break; \ case kWriteInplace: \ case kWriteTo: { \ const OpReqType ReqType = kWriteTo; \ { __VA_ARGS__ } \ } break; \ case kAddTo: { \ const OpReqType ReqType = kAddTo; \ { __VA_ARGS__ } \ } break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ { __VA_ARGS__ } \ } else if (NDim == 2) { \ const int ndim = 2; \ { __VA_ARGS__ } \ } else if (NDim == 3) { \ const int ndim = 3; \ { __VA_ARGS__ } \ } else if (NDim == 4) { \ const int ndim = 4; \ { __VA_ARGS__ } \ } else if (NDim == 5) { \ const int ndim = 5; \ { __VA_ARGS__ } \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ { __VA_ARGS__ } \ } else if (NDim == 2) { \ const int ndim = 2; \ { __VA_ARGS__ } \ } else if (NDim == 3) { \ const int ndim = 3; \ { __VA_ARGS__ } \ } else if (NDim == 4) { \ const int ndim = 4; \ { __VA_ARGS__ } \ } else if (NDim == 5) { \ const int ndim = 5; \ { __VA_ARGS__ } \ } else if (NDim == 6) { \ const int ndim = 6; \ { __VA_ARGS__ } \ } else if (NDim == 7) { \ const int ndim = 7; \ { __VA_ARGS__ } \ } else if (NDim == 8) { \ const int ndim = 8; \ { __VA_ARGS__ } \ } else if (NDim == 9) { \ const int ndim = 9; \ { __VA_ARGS__ } \ } else if (NDim == 10) { \ const int ndim = 10; \ { __VA_ARGS__ } \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: { \ typedef float DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kFloat64: { \ typedef double DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kFloat16: \ case mshadow::kBfloat16: { \ typedef mshadow::half::half_t DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: { \ typedef int32_t DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kInt64: { \ typedef int64_t DType; \ { __VA_ARGS__ } \ } break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_BFLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: { \ typedef float DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kFloat64: { \ typedef double DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kFloat16: { \ typedef mshadow::half::half_t DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kBfloat16: \ LOG(FATAL) << "This operation does not " \ "support bfloat16"; \ break; \ case mshadow::kInt8: { \ typedef int32_t DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kInt32: { \ typedef int32_t DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kInt64: { \ typedef int64_t DType; \ { __VA_ARGS__ } \ } break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: { \ typedef float DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kFloat64: { \ typedef double DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: { \ typedef uint8_t DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kInt8: { \ typedef int8_t DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kInt32: { \ typedef int32_t DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kInt64: { \ typedef int64_t DType; \ { __VA_ARGS__ } \ } break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } template <typename T> struct AccType { using type = T; }; template <> struct AccType<mshadow::half::half_t> { using type = float; }; #define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...) \ switch (type) { \ case mshadow::kFloat32: { \ typedef float DType; \ typedef double AType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kFloat64: { \ typedef double DType; \ typedef double AType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kFloat16: { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kUint8: { \ LOG(FATAL) << "This operation only support " \ "floating point types not uint8"; \ } break; \ case mshadow::kInt8: { \ LOG(FATAL) << "This operation only support " \ "floating point types not int8"; \ } break; \ case mshadow::kInt32: { \ LOG(FATAL) << "This operation only support " \ "floating point types, not int32"; \ } break; \ case mshadow::kInt64: { \ LOG(FATAL) << "This operation only support " \ "floating point types, not int64"; \ } break; \ case mshadow::kBool: { \ LOG(FATAL) << "This operation only support " \ "floating point types, not bool"; \ } break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...) \ switch (type) { \ case mshadow::kFloat32: { \ typedef float DType; \ typedef double AType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kFloat64: { \ typedef double DType; \ typedef double AType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kFloat16: { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kUint8: { \ typedef uint8_t DType; \ typedef uint32_t AType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kInt8: { \ typedef int8_t DType; \ typedef int32_t AType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kInt32: { \ typedef int32_t DType; \ typedef int64_t AType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kInt64: { \ typedef int64_t DType; \ typedef int64_t AType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kBool: { \ typedef bool DType; \ typedef int64_t AType; \ { __VA_ARGS__ } \ } break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: { \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } break; \ case mshadow::kFloat64: { \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } break; \ case mshadow::kFloat16: { \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } break; \ case mshadow::kUint8: { \ typedef uint8_t DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kInt8: { \ typedef int8_t DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kInt32: { \ typedef int32_t DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kInt64: { \ typedef int64_t DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kBool: { \ typedef bool DType; \ { __VA_ARGS__ } \ } break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT32_INT64_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: { \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } break; \ case mshadow::kFloat64: { \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } break; \ case mshadow::kFloat16: { \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } break; \ case mshadow::kUint8: { \ LOG(FATAL) << "This operation only support " \ "integer types, not uint8"; \ } break; \ case mshadow::kInt8: { \ LOG(FATAL) << "This operation only support " \ "integer types, not int8"; \ } break; \ case mshadow::kInt32: { \ typedef int32_t DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kInt64: { \ typedef int64_t DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kBool: { \ LOG(FATAL) << "This operation only support " \ "integer types, not bool"; \ } break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: { \ typedef float DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kFloat64: { \ typedef double DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kFloat16: { \ typedef mshadow::half::half_t DType; \ { __VA_ARGS__ } \ } break; \ case mshadow::kUint8: { \ typedef uint8_t DType; \ { __VA_ARGS__ } \ } break; \ default: \ LOG(FATAL) << "Invalid loading enum type " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("bfloat16", mshadow::kBfloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) #define MXNET_ADD_ALL_TYPES_WITH_BOOL \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("bfloat16", mshadow::kBfloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) \ .add_enum("bool", mshadow::kBool) #define MXNET_ADD_ALL_TYPES_EXT \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("bfloat16", mshadow::kBfloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) \ .add_enum("int16", mshadow::kInt16) \ .add_enum("uint16", mshadow::kUint16) \ .add_enum("uint32", mshadow::kUint32) \ .add_enum("uint64", mshadow::kUint64) #define MXNET_ADD_ALL_TYPES_EXT_WITH_BOOL \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("bfloat16", mshadow::kBfloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) \ .add_enum("bool", mshadow::kBool) \ .add_enum("int16", mshadow::kInt16) \ .add_enum("uint16", mshadow::kUint16) \ .add_enum("uint32", mshadow::kUint32) \ .add_enum("uint64", mshadow::kUint64) /* \brief Compute flattened index given coordinates and shape. */ template <int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template <int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim - 1, j = idx; i >= 0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp * shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template <int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template <int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim - 1, j = idx; i >= 0; --i) { auto tmp = j / shape[i]; ret += (j - tmp * shape[i]) * stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template <int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates */ template <int ndim> MSHADOW_XINLINE bool inc(Shape<ndim>* coord, const Shape<ndim>& shape) { ++(*coord)[ndim - 1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i - 1]; } return (*coord)[0] < shape[0]; } /* Increment coordinates and modify index */ template <int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim - 1]; *idx += stride[ndim - 1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i - 1]; *idx = *idx + stride[i - 1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template <int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim - 1]; *idx1 += stride1[ndim - 1]; *idx2 += stride2[ndim - 1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i - 1]; *idx1 = *idx1 + stride1[i - 1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i - 1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu>* s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH_EXT_WITH_BOOL(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH_EXT_WITH_BOOL(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template <typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template <typename DType, typename... Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; template <typename OP, int req> struct mixed_type_unary_op { typedef OP Operation; /*! \brief input is one tensor */ template <typename OType, typename IType> MSHADOW_XINLINE static void Map(index_t i, OType* out, const IType* in) { KERNEL_ASSIGN(out[i], req, OP::Map(OType(in[i]))); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template <typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template <typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template <typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template <typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* lhs, const DType* rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template <typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template <typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template <typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template <typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template <typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* input_1, const DType* input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template <typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* input_1, const DType* input_2, const DType* input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } /*! \brief input is a tensor and the output is a boolean tensor */ template <typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool* out, const DType* in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors with a boolean output tensor */ template <typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool* out, const DType* lhs, const DType* rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and two scalar value with a boolean output tensor */ template <typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool* out, const DType* in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is two tensors with different type and with a boolean output tensor */ template <typename LType, typename RType, typename std::enable_if<!std::is_same<LType, RType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool* out, const LType* lhs, const RType* rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a half_t output tensor */ template <typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, mshadow::half::half_t* out, const DType* lhs, const mshadow::half::half_t* rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a float output tensor */ template <typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float* out, const DType* lhs, const float* rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a double output tensor */ template <typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_same<DType, float>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, double* out, const DType* lhs, const double* rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a half_t output tensor */ template <typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, mshadow::half::half_t* out, const DType* lhs, const mshadow::half::half_t value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a float output tensor */ template <typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float* out, const DType* lhs, const float value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a double output tensor */ template <typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_same<DType, float>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, double* out, const DType* lhs, const double value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a float output tensor */ template <typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float* out, const DType* lhs, const DType* rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is a tensor and a scalar value with a float output tensor */ template <typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float* out, const DType* in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } }; template <typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template <typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template <typename... Args> inline static bool Launch(mshadow::Stream<cpu>*, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template <typename... Args> inline static bool LaunchDynamic(mshadow::Stream<cpu>*, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template <typename PRIMITIVE_OP, typename DType, typename... Args> static void LaunchTuned(mshadow::Stream<cpu>*, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP(N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template <typename... Args> inline static void LaunchEx(mshadow::Stream<cpu>* s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template <typename DType, typename T = OP, typename... Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu>* s, const size_t N, DType* dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template <typename DType, typename T = OP, typename... Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu>* s, const size_t N, DType* dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template <typename OP, typename... Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template <typename OP, typename... Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template <typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template <typename... Args> inline static void Launch(mshadow::Stream<gpu>* s, int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template <typename... Args> inline static void LaunchEx(mshadow::Stream<gpu>* s, const int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template <int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template <typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template <bool val> struct set_to_bool : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template <typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to true and false */ using set_true = set_to_bool<true>; using set_false = set_to_bool<false>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
vector.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <HiParTI.h> #include <stdlib.h> #include <string.h> /** * Initialize a new value vector * * @param vec a valid pointer to an uninitialized ptiValueVector variable, * @param len number of values to create * @param cap total number of values to reserve * * Vector is a type of one-dimentional array with dynamic length */ int ptiNewValueVector(ptiValueVector *vec, ptiNnzIndex len, ptiNnzIndex cap) { if(cap < len) { cap = len; } if(cap < 2) { cap = 2; } vec->len = len; vec->cap = cap; vec->data = malloc(cap * sizeof *vec->data); pti_CheckOSError(!vec->data, "ValVec New"); memset(vec->data, 0, cap * sizeof *vec->data); return 0; } /** * Fill an existed dense value vector with a specified constant * * @param vec a valid pointer to an existed ptiVector variable, * @param val a given value constant * * Vector is a type of one-dimentional array with dynamic length */ int ptiConstantValueVector(ptiValueVector * const vec, ptiValue const val) { for(ptiNnzIndex i=0; i<vec->len; ++i) vec->data[i] = val; return 0; } /** * Fill an existed dense value vector with a randomized values * * @param vec a valid pointer to an existed ptiVector variable, * @param val a given value constant * * Vector is a type of one-dimentional array with dynamic length */ int ptiRandomValueVector(ptiValueVector * const vec) { // srand(time(NULL)); for(ptiNnzIndex i=0; i<vec->len; ++i) vec->data[i] = rand() % 10 + 1; return 0; } /** * Copy a value vector to an uninitialized value vector * * @param dest a pointer to an uninitialized value vector * @param src a pointer to an existing valid value vector * * The contents of `src` will be copied to `dest`. */ int ptiCopyValueVector(ptiValueVector *dest, const ptiValueVector *src, int const nt) { int result = ptiNewValueVector(dest, src->len, src->len); pti_CheckError(result, "ValVec Copy", NULL); #ifdef HIPARTI_USE_OPENMP #pragma omp parallel for num_threads(nt) for (ptiNnzIndex i=0; i<src->len; ++i) { dest->data[i] = src->data[i]; } #else memcpy(dest->data, src->data, src->len * sizeof *src->data); #endif return 0; } /** * Add a value to the end of a value vector * * @param vec a pointer to a valid value vector * @param value the value to be appended * * The length of the value vector will be changed to contain the new value. */ int ptiAppendValueVector(ptiValueVector *vec, ptiValue const value) { if(vec->cap <= vec->len) { #ifndef MEMCHECK_MODE ptiNnzIndex newcap = vec->cap + vec->cap/2; #else ptiNnzIndex newcap = vec->len+1; #endif ptiValue *newdata = realloc(vec->data, newcap * sizeof *vec->data); pti_CheckOSError(!newdata, "ValVec Append"); vec->cap = newcap; vec->data = newdata; } vec->data[vec->len] = value; ++vec->len; return 0; } /** * Add a value to the end of a value vector * * @param vec a pointer to a valid value vector * @param append_vec a pointer to another value vector, containing the values to be appended * * The values from `append_vec` will be appended to `vec`. */ int ptiAppendValueVectorWithVector(ptiValueVector *vec, const ptiValueVector *append_vec) { ptiNnzIndex newlen = vec->len + append_vec->len; if(vec->cap <= newlen) { ptiNnzIndex newcap = vec->cap + append_vec->cap; ptiValue *newdata = realloc(vec->data, newcap * sizeof *vec->data); pti_CheckOSError(!newdata, "ValVec Append ValVec"); vec->cap = newcap; vec->data = newdata; } for(ptiNnzIndex i=0; i<append_vec->len; ++i) { vec->data[vec->len + i] = append_vec->data[i]; } vec->len = newlen; return 0; } /** * Resize a value vector * * @param vec the value vector to resize * @param size the new size of the value vector * * If the new size is larger than the current size, new values will be appended * but the values of them are undefined. If the new size if smaller than the * current size, values at the end will be truncated. */ int ptiResizeValueVector(ptiValueVector *vec, ptiNnzIndex const size) { ptiNnzIndex newcap = size < 2 ? 2 : size; if(newcap != vec->cap) { ptiValue *newdata = realloc(vec->data, newcap * sizeof *vec->data); pti_CheckOSError(!newdata, "ValVec Resize"); vec->len = size; vec->cap = newcap; vec->data = newdata; } else { vec->len = size; } return 0; } /** * Release the memory buffer a value vector is holding * * @param vec a pointer to a valid value vector * */ void ptiFreeValueVector(ptiValueVector *vec) { vec->len = 0; vec->cap = 0; free(vec->data); } /* * Initialize a new ptiIndex vector * * @param vec a valid pointer to an uninitialized ptiIndex variable, * @param len number of values to create * @param cap total number of values to reserve * * Vector is a type of one-dimentional array with dynamic length */ int ptiNewIndexVector(ptiIndexVector *vec, ptiNnzIndex len, ptiNnzIndex cap) { if(cap < len) { cap = len; } if(cap < 2) { cap = 2; } vec->len = len; vec->cap = cap; vec->data = malloc(cap * sizeof *vec->data); pti_CheckOSError(!vec->data, "IdxVec New"); memset(vec->data, 0, cap * sizeof *vec->data); return 0; } /** * Fill an existed dense index vector with a specified constant * * @param vec a valid pointer to an existed ptiIndexVector variable, * @param num a given value constant * * Vector is a type of one-dimentional array with dynamic length */ int ptiConstantIndexVector(ptiIndexVector * const vec, ptiIndex const num) { for(ptiNnzIndex i=0; i<vec->len; ++i) vec->data[i] = num; return 0; } /** * Copy an index vector to an uninitialized index vector * * @param dest a pointer to an uninitialized index vector * @param src a pointer to an existing valid index vector * * The contents of `src` will be copied to `dest`. */ int ptiCopyIndexVector(ptiIndexVector *dest, const ptiIndexVector *src, int const nt) { int result = ptiNewIndexVector(dest, src->len, src->len); pti_CheckError(result, "IdxVec Copy", NULL); #ifdef HIPARTI_USE_OPENMP #pragma omp parallel for num_threads(nt) for (ptiNnzIndex i=0; i<src->len; ++i) { dest->data[i] = src->data[i]; } #else memcpy(dest->data, src->data, src->len * sizeof *src->data); #endif return 0; } /** * Add a value to the end of a ptiIndexVector * * @param vec a pointer to a valid index vector * @param value the value to be appended * * The length of the size vector will be changed to contain the new value. */ int ptiAppendIndexVector(ptiIndexVector *vec, ptiIndex const value) { if(vec->cap <= vec->len) { #ifndef MEMCHECK_MODE ptiNnzIndex newcap = vec->cap + vec->cap/2; #else ptiNnzIndex newcap = vec->len+1; #endif ptiIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); pti_CheckOSError(!newdata, "IdxVec Append"); vec->cap = newcap; vec->data = newdata; } vec->data[vec->len] = value; ++vec->len; return 0; } /** * Add a value to the end of an index vector * * @param vec a pointer to a valid index vector * @param append_vec a pointer to another index vector, containing the values to be appended * * The values from `append_vec` will be appended to `vec`. */ int ptiAppendIndexVectorWithVector(ptiIndexVector *vec, const ptiIndexVector *append_vec) { ptiNnzIndex newlen = vec->len + append_vec->len; if(vec->cap <= newlen) { ptiNnzIndex newcap = vec->cap + append_vec->cap; ptiIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); pti_CheckOSError(!newdata, "IdxVec Append IdxVec"); vec->cap = newcap; vec->data = newdata; } for(ptiNnzIndex i=0; i<append_vec->len; ++i) { vec->data[vec->len + i] = append_vec->data[i]; } vec->len = newlen; return 0; } /** * Resize an index vector * * @param vec the index vector to resize * @param size the new size of the index vector * * If the new size is larger than the current size, new values will be appended * but the values of them are undefined. If the new size if smaller than the * current size, values at the end will be truncated. */ int ptiResizeIndexVector(ptiIndexVector *vec, ptiNnzIndex const size) { ptiNnzIndex newcap = size < 2 ? 2 : size; if(newcap != vec->cap) { ptiIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); pti_CheckOSError(!newdata, "IdxVec Resize"); vec->len = size; vec->cap = newcap; vec->data = newdata; } else { vec->len = size; } return 0; } /** * Release the memory buffer a ptiIndexVector is holding * * @param vec a pointer to a valid size vector * */ void ptiFreeIndexVector(ptiIndexVector *vec) { free(vec->data); vec->len = 0; vec->cap = 0; } /* * Initialize a new ptiElementIndexVector vector * * @param vec a valid pointer to an uninitialized ptiElementIndex variable, * @param len number of values to create * @param cap total number of values to reserve * * Vector is a type of one-dimentional array with dynamic length */ int ptiNewElementIndexVector(ptiElementIndexVector *vec, ptiNnzIndex len, ptiNnzIndex cap) { if(cap < len) { cap = len; } if(cap < 2) { cap = 2; } vec->len = len; vec->cap = cap; vec->data = malloc(cap * sizeof *vec->data); pti_CheckOSError(!vec->data, "EleIdxVec New"); memset(vec->data, 0, cap * sizeof *vec->data); return 0; } /** * Fill an existed dense element index vector with a specified constant * * @param vec a valid pointer to an existed ptiElementIndexVector variable, * @param num a given value constant * * Vector is a type of one-dimentional array with dynamic length */ int ptiConstantElementIndexVector(ptiElementIndexVector * const vec, ptiElementIndex const num) { for(ptiNnzIndex i=0; i<vec->len; ++i) vec->data[i] = num; return 0; } /** * Copy an element index vector to an uninitialized element index vector * * @param dest a pointer to an uninitialized element index vector * @param src a pointer to an existing valid element index vector * * The contents of `src` will be copied to `dest`. */ int ptiCopyElementIndexVector(ptiElementIndexVector *dest, const ptiElementIndexVector *src) { int result = ptiNewElementIndexVector(dest, src->len, src->len); pti_CheckError(result, "EleIdxVec Copy", NULL); memcpy(dest->data, src->data, src->len * sizeof *src->data); return 0; } /** * Add a value to the end of a ptiElementIndexVector * * @param vec a pointer to a valid element index vector * @param value the value to be appended * * The length of the element index vector will be changed to contain the new value. */ int ptiAppendElementIndexVector(ptiElementIndexVector *vec, ptiElementIndex const value) { if(vec->cap <= vec->len) { #ifndef MEMCHECK_MODE ptiNnzIndex newcap = vec->cap + vec->cap/2; #else ptiNnzIndex newcap = vec->len+1; #endif ptiElementIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); pti_CheckOSError(!newdata, "EleIdxVec Append"); vec->cap = newcap; vec->data = newdata; } vec->data[vec->len] = value; ++vec->len; return 0; } /** * Add a value to the end of an element index vector * * @param vec a pointer to a valid element index vector * @param append_vec a pointer to another element index vector, containing the values to be appended * * The values from `append_vec` will be appended to `vec`. */ int ptiAppendElementIndexVectorWithVector(ptiElementIndexVector *vec, const ptiElementIndexVector *append_vec) { ptiNnzIndex newlen = vec->len + append_vec->len; if(vec->cap <= newlen) { ptiNnzIndex newcap = vec->cap + append_vec->cap; ptiElementIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); pti_CheckOSError(!newdata, "EleIdxVec Append EleIdxVec"); vec->cap = newcap; vec->data = newdata; } for(ptiNnzIndex i=0; i<append_vec->len; ++i) { vec->data[vec->len + i] = append_vec->data[i]; } vec->len = newlen; return 0; } /** * Resize a element index vector * * @param vec the element index vector to resize * @param size the new size of the element index vector * * If the new size is larger than the current size, new values will be appended * but the values of them are undefined. If the new size if smaller than the * current size, values at the end will be truncated. */ int ptiResizeElementIndexVector(ptiElementIndexVector *vec, ptiNnzIndex const size) { ptiNnzIndex newcap = size < 2 ? 2 : size; if(newcap != vec->cap) { ptiElementIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); pti_CheckOSError(!newdata, "EleIdxVec Resize"); vec->len = size; vec->cap = newcap; vec->data = newdata; } else { vec->len = size; } return 0; } /** * Release the memory buffer a ptiElementIndexVector is holding * * @param vec a pointer to a valid size vector * */ void ptiFreeElementIndexVector(ptiElementIndexVector *vec) { free(vec->data); vec->len = 0; vec->cap = 0; } /* * Initialize a new ptiBlockIndexVector vector * * @param vec a valid pointer to an uninitialized ptiBlockIndex variable, * @param len number of values to create * @param cap total number of values to reserve * * Vector is a type of one-dimentional array with dynamic length */ int ptiNewBlockIndexVector(ptiBlockIndexVector *vec, ptiNnzIndex len, ptiNnzIndex cap) { if(cap < len) { cap = len; } if(cap < 2) { cap = 2; } vec->len = len; vec->cap = cap; vec->data = malloc(cap * sizeof *vec->data); pti_CheckOSError(!vec->data, "BlkIdxVec New"); memset(vec->data, 0, cap * sizeof *vec->data); return 0; } /** * Fill an existed dense element index vector with a specified constant * * @param vec a valid pointer to an existed ptiBlockIndexVector variable, * @param num a given value constant * * Vector is a type of one-dimentional array with dynamic length */ int ptiConstantBlockIndexVector(ptiBlockIndexVector * const vec, ptiBlockIndex const num) { for(ptiNnzIndex i=0; i<vec->len; ++i) vec->data[i] = num; return 0; } /** * Copy a block index vector to an uninitialized block index vector * * @param dest a pointer to an uninitialized block index vector * @param src a pointer to an existing valid block index vector * * The contents of `src` will be copied to `dest`. */ int ptiCopyBlockIndexVector(ptiBlockIndexVector *dest, const ptiBlockIndexVector *src) { int result = ptiNewBlockIndexVector(dest, src->len, src->len); pti_CheckError(result, "BlkIdxVec Copy", NULL); memcpy(dest->data, src->data, src->len * sizeof *src->data); return 0; } /** * Add a value to the end of a ptiBlockIndexVector * * @param vec a pointer to a valid block index vector * @param value the value to be appended * * The length of the block index vector will be changed to contain the new value. */ int ptiAppendBlockIndexVector(ptiBlockIndexVector *vec, ptiBlockIndex const value) { if(vec->cap <= vec->len) { #ifndef MEMCHECK_MODE ptiNnzIndex newcap = vec->cap + vec->cap/2; #else ptiNnzIndex newcap = vec->len+1; #endif ptiBlockIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); pti_CheckOSError(!newdata, "BlkIdxVec Append"); vec->cap = newcap; vec->data = newdata; } vec->data[vec->len] = value; ++vec->len; return 0; } /** * Add a value to the end of a block index vector * * @param vec a pointer to a valid block index vector * @param append_vec a pointer to another block index vector, containing the values to be appended * * The values from `append_vec` will be appended to `vec`. */ int ptiAppendBlockIndexVectorWithVector(ptiBlockIndexVector *vec, const ptiBlockIndexVector *append_vec) { ptiNnzIndex newlen = vec->len + append_vec->len; if(vec->cap <= newlen) { ptiNnzIndex newcap = vec->cap + append_vec->cap; ptiBlockIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); pti_CheckOSError(!newdata, "BlkIdxVec Append BlkIdxVec"); vec->cap = newcap; vec->data = newdata; } for(ptiNnzIndex i=0; i<append_vec->len; ++i) { vec->data[vec->len + i] = append_vec->data[i]; } vec->len = newlen; return 0; } /** * Resize a block index vector * * @param vec the block index vector to resize * @param size the new size of the block index vector * * If the new size is larger than the current size, new values will be appended * but the values of them are undefined. If the new size if smaller than the * current size, values at the end will be truncated. */ int ptiResizeBlockIndexVector(ptiBlockIndexVector *vec, ptiNnzIndex const size) { ptiNnzIndex newcap = size < 2 ? 2 : size; if(newcap != vec->cap) { ptiBlockIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); pti_CheckOSError(!newdata, "BlkIdxVec Resize"); vec->len = size; vec->cap = newcap; vec->data = newdata; } else { vec->len = size; } return 0; } /** * Release the memory buffer a ptiBlockIndexVector is holding * * @param vec a pointer to a valid size vector * */ void ptiFreeBlockIndexVector(ptiBlockIndexVector *vec) { free(vec->data); vec->len = 0; vec->cap = 0; } /* * Initialize a new ptiNnzIndexVector vector * * @param vec a valid pointer to an uninitialized ptiNnzIndex variable, * @param len number of values to create * @param cap total number of values to reserve * * Vector is a type of one-dimentional array with dynamic length */ int ptiNewNnzIndexVector(ptiNnzIndexVector *vec, ptiNnzIndex len, ptiNnzIndex cap) { if(cap < len) { cap = len; } if(cap < 2) { cap = 2; } vec->len = len; vec->cap = cap; vec->data = malloc(cap * sizeof *vec->data); pti_CheckOSError(!vec->data, "NnzIdxVec New"); memset(vec->data, 0, cap * sizeof *vec->data); return 0; } /** * Fill an existed dense long nnz index vector with a specified constant * * @param vec a valid pointer to an existed ptiNnzIndexVector variable, * @param num a given value constant * * Vector is a type of one-dimentional array with dynamic length */ int ptiConstantNnzIndexVector(ptiNnzIndexVector * const vec, ptiNnzIndex const num) { for(ptiNnzIndex i=0; i<vec->len; ++i) vec->data[i] = num; return 0; } /** * Copy a long nnz index vector to an uninitialized long nnz index vector * * @param dest a pointer to an uninitialized long nnz index vector * @param src a pointer to an existing valid long nnz index vector * * The contents of `src` will be copied to `dest`. */ int ptiCopyNnzIndexVector(ptiNnzIndexVector *dest, const ptiNnzIndexVector *src) { int result = ptiNewNnzIndexVector(dest, src->len, src->len); pti_CheckError(result, "NnzIdxVec Copy", NULL); memcpy(dest->data, src->data, src->len * sizeof *src->data); return 0; } /** * Add a value to the end of a ptiNnzIndexVector * * @param vec a pointer to a valid long nnz index vector * @param value the value to be appended * * The length of the long nnz index vector will be changed to contain the new value. */ int ptiAppendNnzIndexVector(ptiNnzIndexVector *vec, ptiNnzIndex const value) { if(vec->cap <= vec->len) { #ifndef MEMCHECK_MODE ptiNnzIndex newcap = vec->cap + vec->cap/2; #else ptiNnzIndex newcap = vec->len+1; #endif ptiNnzIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); pti_CheckOSError(!newdata, "NnzIdxVec Append"); vec->cap = newcap; vec->data = newdata; } vec->data[vec->len] = value; ++vec->len; return 0; } /** * Add a value to the end of a long nnz index vector * * @param vec a pointer to a valid long nnz index vector * @param append_vec a pointer to another long nnz index vector, containing the values to be appended * * The values from `append_vec` will be appended to `vec`. */ int ptiAppendNnzIndexVectorWithVector(ptiNnzIndexVector *vec, const ptiNnzIndexVector *append_vec) { ptiNnzIndex newlen = vec->len + append_vec->len; if(vec->cap <= newlen) { ptiNnzIndex newcap = vec->cap + append_vec->cap; ptiNnzIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); pti_CheckOSError(!newdata, "NnzIdxVec Append NnzIdxVec"); vec->cap = newcap; vec->data = newdata; } for(ptiNnzIndex i=0; i<append_vec->len; ++i) { vec->data[vec->len + i] = append_vec->data[i]; } vec->len = newlen; return 0; } /** * Resize a long nnz index vector * * @param vec the long nnz index vector to resize * @param size the new size of the long nnz index vector * * If the new size is larger than the current size, new values will be appended * but the values of them are undefined. If the new size if smaller than the * current size, values at the end will be truncated. */ int ptiResizeNnzIndexVector(ptiNnzIndexVector *vec, ptiNnzIndex const size) { ptiNnzIndex newcap = size < 2 ? 2 : size; if(newcap != vec->cap) { ptiNnzIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); pti_CheckOSError(!newdata, "NnzIdxVec Resize"); vec->len = size; vec->cap = newcap; vec->data = newdata; } else { vec->len = size; } return 0; } /** * Release the memory buffer a ptiNnzIndexVector is holding * * @param vec a pointer to a valid long nnz vector * */ void ptiFreeNnzIndexVector(ptiNnzIndexVector *vec) { free(vec->data); vec->len = 0; vec->cap = 0; }
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/threshold.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double normalize, **kernel; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t w, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (w=0; w < (ssize_t) width; w+=2) { ssize_t j, k, u, v; kernel[w]=(double *) MagickAssumeAligned(AcquireAlignedMemory( (size_t) (width-w),(width-w)*sizeof(**kernel))); if (kernel[w] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-w-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[w][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[w][k]; k++; } } kernel[w][(k-1)/2]+=(double) (1.0-normalize); if (sigma < MagickEpsilon) kernel[w][(k-1)/2]=1.0; } if (w < (ssize_t) width) { for (w-=2; w >= 0; w-=2) kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { const Quantum *magick_restrict r; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { const Quantum *magick_restrict p; ssize_t i; ssize_t center, j; j=CastDoubleToLong(ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5)); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; const double *magick_restrict k; const Quantum *magick_restrict pixels; ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(blur_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (w=0; w < (ssize_t) width; w+=2) kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double normalize, **kernel; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t w, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sharp_image=CloneImage(image,0,0,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (w=0; w < (ssize_t) width; w+=2) { ssize_t j, k, u, v; kernel[w]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-w),(width-w)*sizeof(**kernel))); if (kernel[w] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-w-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[w][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[w][k]; k++; } } kernel[w][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[w][(k-1)/2]=1.0; } if (w < (ssize_t) width) { for (w-=2; w >= 0; w-=2) kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { const Quantum *magick_restrict r; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) sharp_image->columns; x++) { const Quantum *magick_restrict p; ssize_t i; ssize_t center, j; j=CastDoubleToLong(ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5)); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait sharp_traits, traits; const double *magick_restrict k; const Quantum *magick_restrict pixels; ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); sharp_traits=GetPixelChannelTraits(sharp_image,channel); if ((traits == UndefinedPixelTrait) || (sharp_traits == UndefinedPixelTrait)) continue; if ((sharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(sharp_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((sharp_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(sharp_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (w=0; w < (ssize_t) width; w+=2) kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l a t e r a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilateralBlurImage() is a non-linear, edge-preserving, and noise-reducing % smoothing filter for images. It replaces the intensity of each pixel with % a weighted average of intensity values from nearby pixels. This weight is % based on a Gaussian distribution. The weights depend not only on Euclidean % distance of pixels, but also on the radiometric differences (e.g., range % differences, such as color intensity, depth distance, etc.). This preserves % sharp edges. % % The format of the BilateralBlurImage method is: % % Image *BilateralBlurImage(const Image *image,const size_t width, % const size_t height,const double intensity_sigma, % const double spatial_sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the neighborhood in pixels. % % o height: the height of the neighborhood in pixels. % % o intensity_sigma: sigma in the intensity space. A larger value means % that farther colors within the pixel neighborhood (see spatial_sigma) % will be mixed together, resulting in larger areas of semi-equal color. % % o spatial_sigma: sigma in the coordinate space. A larger value means that % farther pixels influence each other as long as their colors are close % enough (see intensity_sigma ). When the neigborhood diameter is greater % than zero, it specifies the neighborhood size regardless of % spatial_sigma. Otherwise, the neigborhood diameter is proportional to % spatial_sigma. % % o exception: return any errors or warnings in this structure. % */ static inline double BlurDistance(const ssize_t x,const ssize_t y, const ssize_t u,const ssize_t v) { return(sqrt(((double) x-u)*((double) x-u)+((double) y-v)*((double) y-v))); } static inline double BlurGaussian(const double x,const double sigma) { return(exp(-((double) x*x)*PerceptibleReciprocal(2.0*sigma*sigma))* PerceptibleReciprocal(Magick2PI*sigma*sigma)); } static double **DestroyBilateralTLS(const ssize_t number_threads, double **weights) { ssize_t i; assert(weights != (double **) NULL); for (i=0; i <= (ssize_t) number_threads; i++) if (weights[i] != (double *) NULL) weights[i]=(double *) RelinquishMagickMemory(weights[i]); weights=(double **) RelinquishMagickMemory(weights); return(weights); } static double **AcquireBilateralTLS(const size_t number_threads, const size_t width,const size_t height) { double **weights; ssize_t i; weights=(double **) AcquireQuantumMemory(number_threads+1,sizeof(*weights)); if (weights == (double **) NULL) return((double **) NULL); (void) memset(weights,0,number_threads*sizeof(*weights)); for (i=0; i <= (ssize_t) number_threads; i++) { weights[i]=(double *) AcquireQuantumMemory(width,height*sizeof(**weights)); if (weights[i] == (double *) NULL) return(DestroyBilateralTLS(number_threads,weights)); } return(weights); } MagickExport Image *BilateralBlurImage(const Image *image,const size_t width, const size_t height,const double intensity_sigma,const double spatial_sigma, ExceptionInfo *exception) { #define MaxIntensity (255) #define BilateralBlurImageTag "Blur/Image" CacheView *blur_view, *image_view; double intensity_gaussian[2*(MaxIntensity+1)], *spatial_gaussian, **weights; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo mid; ssize_t number_threads, w, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } number_threads=(size_t) GetMagickResourceLimit(ThreadResource); weights=AcquireBilateralTLS(number_threads,MagickMax(width,1), MagickMax(height,1)); if (weights == (double **) NULL) { blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (w=(-MaxIntensity); w < MaxIntensity; w++) intensity_gaussian[w+MaxIntensity]=BlurGaussian((double) w,intensity_sigma); spatial_gaussian=weights[number_threads]; { ssize_t n, v; n=0; mid.x=(ssize_t) (MagickMax(width,1)/2L); mid.y=(ssize_t) (MagickMax(height,1)/2L); for (v=0; v < (ssize_t) MagickMax(height,1); v++) { ssize_t u; for (u=0; u < (ssize_t) MagickMax(width,1); u++) spatial_gaussian[n++]=BlurGaussian(BlurDistance(0,0,u-mid.x,v-mid.y), spatial_sigma); } } /* Bilateral blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { double gamma, pixel; const Quantum *magick_restrict p, *magick_restrict r; ssize_t i, u; ssize_t n, v; /* Tonal weighting preserves edges while smoothing in the flat regions. */ p=GetCacheViewVirtualPixels(image_view,x-mid.x,y-mid.y,MagickMax(width,1), MagickMax(height,1),exception); if (p == (const Quantum *) NULL) break; p+=(ssize_t) GetPixelChannels(image)*MagickMax(width,1)*mid.y+ GetPixelChannels(image)*mid.x; n=0; for (v=0; v < (ssize_t) MagickMax(height,1); v++) { for (u=0; u < (ssize_t) MagickMax(width,1); u++) { double intensity; r=p+(ssize_t) GetPixelChannels(image)*(ssize_t) MagickMax(width,1)* (mid.y-v)+GetPixelChannels(image)*(mid.x-u); intensity=ScaleQuantumToChar(GetPixelIntensity(image,r))- (double) ScaleQuantumToChar(GetPixelIntensity(image,p)); if ((intensity >= -MaxIntensity) && (intensity <= MaxIntensity)) weights[id][n]=intensity_gaussian[(ssize_t) intensity+MaxIntensity]* spatial_gaussian[n]; else weights[id][n]=BlurGaussian(intensity,intensity_sigma)* BlurGaussian(BlurDistance(x,y,x+u-mid.x,y+v-mid.y),spatial_sigma); n++; } } for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { PixelChannel channel; PixelTrait blur_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } pixel=0.0; gamma=0.0; n=0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) MagickMax(height,1); v++) { for (u=0; u < (ssize_t) MagickMax(width,1); u++) { r=p+(ssize_t) GetPixelChannels(image)*MagickMax(width,1)* (mid.y-v)+GetPixelChannels(image)*(mid.x-u); pixel+=weights[id][n]*r[i]; gamma+=weights[id][n]; n++; } } SetPixelChannel(blur_image,channel,ClampToQuantum( PerceptibleReciprocal(gamma)*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) MagickMax(height,1); v++) { for (u=0; u < (ssize_t) MagickMax(width,1); u++) { double alpha, beta; r=p+(ssize_t) GetPixelChannels(image)*MagickMax(width,1)*(mid.y-v)+ GetPixelChannels(image)*(mid.x-u); alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=weights[id][n]*r[i]; gamma+=weights[id][n]*alpha*beta; n++; } } SetPixelChannel(blur_image,channel,ClampToQuantum( PerceptibleReciprocal(gamma)*pixel),q); } q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BilateralBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); weights=DestroyBilateralTLS(number_threads,weights); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image, const KernelInfo *kernel_info,ExceptionInfo *exception) { Image *convolve_image; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImage(image,kernel_info,exception); if (convolve_image != (Image *) NULL) return(convolve_image); #endif convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info, exception); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { Quantum *p, *q, *r, *s; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*((ssize_t) columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickRealType v; ssize_t i, x; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*((ssize_t) columns+2)+x_offset); s=q-(y_offset*((ssize_t) columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { ssize_t i, x; MagickRealType v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) && ((MagickRealType) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) && ((MagickRealType) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; Quantum *magick_restrict buffer, *magick_restrict pixels; ssize_t i; size_t length; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image,exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,0,0,MagickTrue,exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(despeckle_image,DirectClass,exception); if (status == MagickFalse) { despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait despeckle_traits, traits; ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); despeckle_traits=GetPixelChannelTraits(despeckle_image,channel); if ((traits == UndefinedPixelTrait) || (despeckle_traits == UndefinedPixelTrait)) continue; if ((despeckle_traits & CopyPixelTrait) != 0) continue; (void) memset(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { pixels[j++]=p[i]; p+=GetPixelChannels(image); } j++; } (void) memset(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelChannel(despeckle_image,channel,pixels[j++],q); q+=GetPixelChannels(despeckle_image); } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) status=MagickFalse; j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->width* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImage(emboss_image,exception); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you. % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickRealType GetMeanLuma(const Image *magick_restrict image, const double *magick_restrict pixel) { return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */ } MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define KuwaharaImageTag "Kuwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,0,0,MagickTrue,exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse) { gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,kuwahara_image,gaussian_image->rows,1) #endif for (y=0; y < (ssize_t) gaussian_image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) gaussian_image->columns; x++) { const Quantum *magick_restrict p; double min_variance; RectangleInfo quadrant, target; size_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const Quantum *magick_restrict k; double mean[MaxPixelChannels], variance; ssize_t n; ssize_t j; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } case 3: default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const Quantum *) NULL) break; for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]=0.0; k=p; for (n=0; n < (ssize_t) (width*width); n++) { for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]+=(double) k[j]; k+=GetPixelChannels(gaussian_image); } for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(gaussian_image,k); variance+=(luma-GetMeanLuma(gaussian_image,mean))* (luma-GetMeanLuma(gaussian_image,mean)); k+=GetPixelChannels(gaussian_image); } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image, UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double) target.y+target.height/2.0,q,exception); if (status == MagickFalse) break; q+=GetPixelChannels(kuwahara_image); } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,KuwaharaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanline, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanline_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse) { contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanline_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanline)); if (scanline_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanline=(float *) GetVirtualMemoryBlob(scanline_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanline_info=RelinquishVirtualMemory(scanline_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(float) ((width+1)*(width+1)); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *out, *pix, *pixels; ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanline; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p+=image->number_channels; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *pix, *pixels; Quantum *magick_restrict q; ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanline; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; PixelTrait traits; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelRed(contrast_image,ClampToQuantum((MagickRealType) GetPixelRed(image,p)*mult),q); traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelGreen(contrast_image,ClampToQuantum((MagickRealType) GetPixelGreen(image,p)*mult),q); traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlue(contrast_image,ClampToQuantum((MagickRealType) GetPixelBlue(image,p)*mult),q); p+=image->number_channels; q+=contrast_image->number_channels; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanline_info=RelinquishVirtualMemory(scanline_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static MagickRealType *GetMotionBlurKernel(const size_t width, const double sigma) { MagickRealType *kernel, normalize; ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view, *motion_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; OffsetInfo *offset; PointInfo point; size_t width; ssize_t w, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (w=0; w < (ssize_t) width; w++) { offset[w].x=CastDoubleToLong(ceil((double) (w*point.y)/ hypot(point.x,point.y)-0.5)); offset[w].y=CastDoubleToLong(ceil((double) (w*point.x)/ hypot(point.x,point.y)-0.5)); } /* Motion blur image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateMotionBlurImage(image,kernel,width,offset,exception); if (blur_image != (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return(blur_image); } #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); motion_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; const Quantum *magick_restrict r; MagickRealType *magick_restrict k; ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } k=kernel; pixel=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+ offset[j].y,1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=(*k)*r[i]; k++; } SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q); continue; } alpha=0.0; gamma=0.0; for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1, 1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=(*k)*alpha*r[i]; gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); motion_view=DestroyCacheView(motion_view); image_view=DestroyCacheView(image_view); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MagickPathExtent], label[MagickPathExtent]; double degrees, gamma, percentage, radius, sigma, threshold; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception); if (i == (NumberTiles/2)) { (void) QueryColorCompliance("#dfdfdf",AllCompliance, &thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees, 2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImage(preview_image,gamma,exception); (void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse,exception); (void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent,"colors %.20g", (double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,(size_t) radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MagickPathExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MagickPathExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MagickPathExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MagickPathExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MagickPathExtent); break; } case 6: { (void) CopyMagickString(factor,"Poisson",MagickPathExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail,(double) (percentage*((double) QuantumRange+1.0))/100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"threshold %g", (double) (percentage*((double) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,image->interpolate,radius, exception); (void) FormatLocaleString(label,MagickPathExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange*percentage/ 100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees, degrees); break; } case RaisePreview: { RectangleInfo raise; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; raise.width=(size_t) (2*i+2); raise.height=(size_t) (2*i+2); raise.x=(i-1)/2; raise.y=(i-1)/2; (void) RaiseImage(preview_image,&raise,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) raise.width,(double) raise.height,(double) raise.x,(double) raise.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold,exception); (void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees, image->interpolate,exception); (void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5* degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MagickPathExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MagickPathExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image,exception); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MagickPathExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%.20gb ",factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; preview_image->alpha_trait=UndefinedPixelTrait; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label,exception); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename, MagickPathExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o angle: the angle of the radial blur. % % o blur: the blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { CacheView *blur_view, *image_view, *radial_view; double blur_radius, *cos_theta, offset, *sin_theta, theta; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; PointInfo blur_center; size_t n; ssize_t w, y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRotationalBlurImage(image,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(double) (n-1); cos_theta=(double *) AcquireQuantumMemory((size_t) n,sizeof(*cos_theta)); sin_theta=(double *) AcquireQuantumMemory((size_t) n,sizeof(*sin_theta)); if ((cos_theta == (double *) NULL) || (sin_theta == (double *) NULL)) { if (cos_theta != (double *) NULL) cos_theta=(double *) RelinquishMagickMemory(cos_theta); if (sin_theta != (double *) NULL) sin_theta=(double *) RelinquishMagickMemory(sin_theta); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(double) (n-1)/2.0; for (w=0; w < (ssize_t) n; w++) { cos_theta[w]=cos((double) (theta*w-offset)); sin_theta[w]=sin((double) (theta*w-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); radial_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double radius; PointInfo center; ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; const Quantum *magick_restrict r; ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } gamma=0.0; pixel=0.0; if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) || (channel == AlphaPixelChannel)) { for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=r[i]; gamma++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { double alpha; r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) QuantumScale*GetPixelAlpha(image,r); pixel+=alpha*r[i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); radial_view=DestroyCacheView(radial_view); image_view=DestroyCacheView(image_view); cos_theta=(double *) RelinquishMagickMemory(cos_theta); sin_theta=(double *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; size_t width; ssize_t center, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,width*sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); { ssize_t i, j, v; j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { ssize_t u; for (u=(-j); u <= j; u++) kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; const MagickRealType *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double) *k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace,exception); if (status == MagickFalse) { luminance_image=DestroyImage(luminance_image); blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)* ((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L)); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double contrast; MagickBooleanType sync; const Quantum *magick_restrict l, *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (l == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity; ssize_t i; intensity=GetPixelIntensity(image,p+center); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; const MagickRealType *magick_restrict k; const Quantum *magick_restrict luminance_pixels, *magick_restrict pixels; ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel; pixel=0.0; pixels=p; luminance_pixels=l; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,luminance_pixels)- intensity; if (fabs(contrast) < threshold) { pixel+=(*k)*pixels[i]; gamma+=(*k); } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(image,pixels)-intensity; if (fabs(contrast) < threshold) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); l+=GetPixelChannels(luminance_image); q+=GetPixelChannels(blur_image); } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SelectiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); luminance_view=DestroyCacheView(luminance_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define GetShadeIntensity(image,pixel) \ ClampPixel(GetPixelIntensity((image),(pixel))) #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { double distance, normal_distance, shade; PrimaryInfo normal; const Quantum *magick_restrict center, *magick_restrict p, *magick_restrict post, *magick_restrict pre; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { ssize_t i; /* Determine the surface normal and compute shading. */ pre=p+GetPixelChannels(linear_image); center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image); post=center+(linear_image->columns+2)*GetPixelChannels(linear_image); normal.x=(double) ( GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,center-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,center+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))); normal.y=(double) ( GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,post)+ GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre)- GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+ normal.z*normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel; PixelTrait shade_traits, traits; channel=GetPixelChannelChannel(linear_image,i); traits=GetPixelChannelTraits(linear_image,channel); shade_traits=GetPixelChannelTraits(shade_image,channel); if ((traits == UndefinedPixelTrait) || (shade_traits == UndefinedPixelTrait)) continue; if ((shade_traits & CopyPixelTrait) != 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if ((traits & UpdatePixelTrait) == 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if (gray != MagickFalse) { SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q); continue; } SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade* center[i]),q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(shade_image); } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ShadeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a square area defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image, % const PixelInterpolateMethod method,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: intepolation method. % % o radius: choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image, const PixelInterpolateMethod method,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); spread_image=CloneImage(image,0,0,MagickTrue,exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse) { spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoTLS(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,spread_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); status=InterpolatePixelChannels(image,image_view,spread_image,method, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q, exception); if (status == MagickFalse) break; q+=GetPixelChannels(spread_image); } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpreadImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoTLS(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; double quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); /* This kernel appears to be broken. #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold, exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif */ unsharp_image=BlurImage(image,radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(double) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits, unsharp_traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); unsharp_traits=GetPixelChannelTraits(unsharp_image,channel); if ((traits == UndefinedPixelTrait) || (unsharp_traits == UndefinedPixelTrait)) continue; if ((unsharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(unsharp_image,channel,p[i],q); continue; } pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q); if (fabs(2.0*pixel) < quantum_threshold) pixel=(double) p[i]; else pixel=(double) p[i]+gain*pixel; SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(unsharp_image); } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SharpenImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
nvidia_flex_pre_utilities.h
#ifndef NVIDIA_FLEX_PRE_UTILITES_H #define NVIDIA_FLEX_PRE_UTILITES_H /* System includes */ #include <limits> #include <iostream> #include <iomanip> #include <fstream> #include <vector> #include <stdlib.h> #include <time.h> #include <string> /* External includes */ #ifdef _OPENMP #include <omp.h> #endif /* Project includes */ #include "includes/define.h" #include "utilities/timer.h" #include "includes/variables.h" #include "utilities/openmp_utils.h" //#include "cluster_information.h" //#include "custom_elements/spheric_continuum_particle.h" namespace Kratos { class NvidiaFlexPreUtilities { public: typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ModelPart::NodesContainerType::ContainerType NodesContainerType; typedef WeakPointerVector<Element> ParticleWeakVectorType; typedef WeakPointerVector<Element>::iterator ParticleWeakIteratorType; KRATOS_CLASS_POINTER_DEFINITION(NvidiaFlexPreUtilities); /// Default constructor NvidiaFlexPreUtilities() {} NvidiaFlexPreUtilities(ModelPart& rModelPart) { //mInitialCenterOfMassAndMass = CalculateCenterOfMass(rModelPart); //mInitialMass = CalculateTotalMass(rModelPart); } /// Destructor virtual ~NvidiaFlexPreUtilities() {} void RemoveSpheresInitiallyIndentedWithFEM(ModelPart& rSpheresModelPart) { ElementsArrayType& pElements = rSpheresModelPart.GetCommunicator().LocalMesh().Elements(); #pragma omp parallel for for (int k = 0; k < (int)pElements.size(); k++) { ElementsArrayType::iterator it = pElements.ptr_begin() + k; Element* p_element = &(*it); SphericParticle* p_sphere = dynamic_cast<SphericParticle*>(p_element); if (p_sphere->mNeighbourRigidFaces.size()) { p_sphere->Set(TO_ERASE); p_sphere->GetGeometry()[0].Set(TO_ERASE); } } } /// Turn back information as a stemplate<class T, std::size_t dim> tring. virtual std::string Info() const { return ""; } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const { } vector<unsigned int>& GetElementPartition() {return (mElementPartition);}; protected: vector<unsigned int> mElementPartition; private: array_1d<double, 3> mInitialCenterOfMassAndMass; double mInitialMass; /// Assignment operator NvidiaFlexPreUtilities & operator=(NvidiaFlexPreUtilities const& rOther); }; // Class NvidiaFlexPreUtilities } // namespace Kratos #endif // NVIDIA_FLEX_PRE_UTILITES_H
parallel_blocked_ldlt.h
// // Created by kazem on 11/30/18. // #ifndef PROJECT_PARALLEL_BLOCKED_LDLT_H #define PROJECT_PARALLEL_BLOCKED_LDLT_H #include <stdlib.h> #include <cmath> #include <cassert> #include "mkl.h" #include "Reach.h" #include "Sym_BLAS.h" #undef TIMING #undef TLAST #undef TIMING1 namespace nasoq { bool ldl_left_sn_parallel_01(int n, int *c, int *r, double *values, size_t *lC, int *lR, size_t *Li_ptr, double *lValues, double *D, int *blockSet, int supNo, double *timing, #ifndef PRUNE int *aTree, int *cT, int *rT, int *col2Sup, #else int *prunePtr, int *pruneSet, #endif int nLevels, int *levelPtr, int *levelSet, int nPar, int *parPtr, int *partition, int chunk, int threads, int super_max, int col_max, int &nbpivot, double threshold = 1e-13) { /* * For timing using BLAS */ const int incx = 1; int top = 0; int *xi = new int[2 * supNo](); //int super_max = 64; //tunig parameter for the max size of supernodes TODO: find it in analysis //int col_max = n; int *map; //= new int[n](); double *contribs; //= new double[super_max*col_max](); double *trn_diag; //= new double[super_max*col_max](); int info; double one[2], zero[2]; one[0] = 1.0; /* ALPHA for *syrk, *herk, *gemm, and *trsm */ one[1] = 0.; zero[0] = 0.; /* BETA for *syrk, *herk, and *gemm */ zero[1] = 0.; int *ipiv = new int[n](); std::chrono::time_point<std::chrono::system_clock> start, end, startin, endin; std::chrono::duration<double> elapsed_seconds; double duration4 = 0, duration3 = 0, duration2 = 0, duration1 = 0; #ifdef TIMING start = std::chrono::system_clock::now(); #endif for (int i1 = 0; i1 < nLevels - 1; ++i1) { #pragma omp parallel //shared(lValues)//private(map, contribs) { #pragma omp for schedule(dynamic) private(map, trn_diag, contribs, xi, startin, endin, duration2) for (int j1 = levelPtr[i1]; j1 < levelPtr[i1 + 1]; ++j1) { #ifdef BLASTIMING int threadID = omp_get_thread_num(); std::chrono::time_point<std::chrono::system_clock> startBlas, endBlas; #endif map = new int[n](); contribs = new double[super_max * col_max](); xi = new int[2 * supNo](); trn_diag = new double[super_max * col_max](); //int pls = levelSet[j1]; #ifdef TIMING1 startin = std::chrono::system_clock::now(); #endif //#pragma omp parallel for schedule(static,chunk)private(thth) for (int k1 = parPtr[j1]; k1 < parPtr[j1 + 1]; ++k1) { int s = partition[k1] + 1; int curCol = s != 0 ? blockSet[s - 1] : 0; int nxtCol = blockSet[s]; int supWdt = nxtCol - curCol; int nSupR = Li_ptr[nxtCol] - Li_ptr[curCol];//row size of supernode for (int i = Li_ptr[curCol], cnt = 0; i < Li_ptr[nxtCol]; ++i) { map[lR[i]] = cnt++;//mapping L rows position to actual row idx } //copy the columns from A to L for (int i = curCol; i < nxtCol; ++i) {//Copy A to L int pad = i - curCol; for (int j = c[i]; j < c[i + 1]; ++j) { // if(r[j]>=i)//does not need to save upper part. lValues[lC[i] + map[r[j]]] = values[j]; // else // printf("dddd\n"); } } #if DEBUG top = ereach_sn(supNo,c,r,curCol,nxtCol,col2sup, eTree,xi,xi+supNo); if(supNo-top != prunePtr[s]-prunePtr[s-1]) printf("sss"); #endif double *src, *cur = &lValues[lC[curCol]];//pointing to first element of the current supernode //#ifndef PRUNE top = ereach_sn(supNo, cT, rT, curCol, nxtCol, col2Sup, aTree, xi, xi + supNo); assert(top >= 0); //if(s==2){top =2; xi[top] = 0;} for (int i = top; i < supNo; ++i) { int lSN = xi[i]; /*#else for (int i = prunePtr[s - 1]; i < prunePtr[s]; ++i) { int lSN = pruneSet[i]; #endif*/ int nSupRs = 0; #if DEBUG if(xi[top++] != lSN) printf("fail"); #endif int cSN = blockSet[lSN];//first col of current SN int cNSN = blockSet[lSN + 1];//first col of Next SN int Li_ptr_cNSN = Li_ptr[cNSN]; int Li_ptr_cSN = Li_ptr[cSN]; int nSNRCur = Li_ptr_cNSN - Li_ptr_cSN; int supWdts = cNSN - cSN;//The width of current src SN int lb = 0, ub = 0; bool sw = true; int beg_col = cSN, end_col = 0; for (int j = Li_ptr_cSN; j < Li_ptr_cNSN; ++j) { //finding the overlap between curCol and curCol+supWdt in the src col if (lR[j] >= curCol && sw) { //src*transpose(row lR[j]) lb = j - Li_ptr_cSN; sw = false; } if (lR[j] < curCol + supWdt && !sw) { ub = j - Li_ptr_cSN; } } nSupRs = Li_ptr_cNSN - Li_ptr_cSN - lb; int ndrow1 = ub - lb + 1; int ndrow3 = nSupRs - ndrow1; src = &lValues[lC[cSN] + lb];//first element of src supernode starting from row lb double *srcL = &lValues[lC[cSN] + ub + 1]; // multiplying L * D for (int l = 0; l < supWdts; ++l) { double tmp = D[cSN + l]; for (int l1 = 0; l1 < nSupRs; ++l1) { trn_diag[l * nSupRs + l1] = tmp * src[l * nSNRCur + l1]; } } /*dgemm("N", "C", &ndrow3, &ndrow1, &supWdts, one, srcL, &nSNRCur, src, &nSNRCur, zero, &contribs[ndrow1], &nSupRs);*/ dgemm("N", "C", &nSupRs, &ndrow1, &supWdts, one, trn_diag, &nSupRs, src, &nSNRCur, zero, contribs, &nSupRs); //copying contrib to L for (int i = 0; i < ndrow1; ++i) {//Copy contribs to L int col = map[lR[Li_ptr_cSN + i + lb]];//col in the SN //double ddiag = 1.0 ;/// D[col]; for (int j = i; j < nSupRs; ++j) { int cRow = lR[Li_ptr_cSN + j + lb];//corresponding row in SN //lValues[lC[curCol+col]+ map[cRow]] -= contribs[i*nSupRs+j]; cur[col * nSupR + map[cRow]] -= contribs[i * nSupRs + j]; } } } #ifdef MKL //dpotrf("L",&supWdt,cur,&nSupR,&info); sym_sytrf(cur, supWdt, nSupR, &nbpivot, threshold); //LAPACKE_dsytrf(LAPACK_COL_MAJOR,'L',supWdt,cur,nSupR,ipiv); #endif // Making L*D int rowNo = nSupR - supWdt; for (int l = 0; l < supWdt; ++l) { double tmp = cur[l + l * nSupR]; D[curCol + l] = tmp; double *stCol = trn_diag + l * supWdt + l; double *curCol = cur + l * nSupR + l; *stCol = tmp; for (int l1 = 0; l1 < supWdt - l - 1; ++l1) { *(++stCol) = tmp * *(++curCol); } } dtrsm("R", "L", "C", "N", &rowNo, &supWdt, one, trn_diag, &supWdt, &cur[supWdt], &nSupR); for (int k = 0; k < supWdt; ++k) { cur[k * nSupR + k] = 1.0; } //copying 1/Di into D /*for (int l = 0; l < supWdt; ++l) { D[curCol+l] = one[0] / cur[l + l*nSupR]; }*/ /* for (int k = 0; k < nSupR * supWdt; ++k) { std::cout<<cur[k]<<","; } std::cout<<"==== \n";*/ } delete[]contribs; delete[]trn_diag; delete[]xi; delete[]map; } #ifdef TIMING1 endin = std::chrono::system_clock::now(); elapsed_seconds = endin-startin; duration1=elapsed_seconds.count(); int thth2=omp_get_thread_num(); std::cout<<"**"<<thth2<<" : "<<j1<<" "<<duration1<<"\n"; #endif } } #if 1 //LAst iteration MKL_Domain_Set_Num_Threads(threads, MKL_DOMAIN_BLAS); map = new int[n](); contribs = new double[super_max * col_max](); xi = new int[2 * supNo](); trn_diag = new double[super_max * col_max](); for (int j1 = levelPtr[nLevels - 1]; j1 < levelPtr[nLevels]; ++j1) { #ifdef TLAST start = std::chrono::system_clock::now(); #endif for (int k1 = parPtr[j1]; k1 < parPtr[j1 + 1]; ++k1) { int s = partition[k1] + 1; int curCol = s != 0 ? blockSet[s - 1] : 0; int nxtCol = blockSet[s]; int supWdt = nxtCol - curCol; int nSupR = Li_ptr[nxtCol] - Li_ptr[curCol];//row size of supernode for (int i = Li_ptr[curCol], cnt = 0; i < Li_ptr[nxtCol]; ++i) { map[lR[i]] = cnt++;//mapping L rows position to actual row idx } //copy the columns from A to L for (int i = curCol; i < nxtCol; ++i) {//Copy A to L int pad = i - curCol; for (int j = c[i]; j < c[i + 1]; ++j) { // if(r[j]>=i)//does not need to save upper part. lValues[lC[i] + map[r[j]]] = values[j]; // else // printf("dddd\n"); } } #if DEBUG top = ereach_sn(supNo,c,r,curCol,nxtCol,col2sup, eTree,xi,xi+supNo); if(supNo-top != prunePtr[s]-prunePtr[s-1]) printf("sss"); #endif double *src, *cur = &lValues[lC[curCol]];//pointing to first element of the current supernode //#ifndef PRUNE top = ereach_sn(supNo, cT, rT, curCol, nxtCol, col2Sup, aTree, xi, xi + supNo); assert(top >= 0); //if(s==2){top =2; xi[top] = 0;} for (int i = top; i < supNo; ++i) { int lSN = xi[i]; /*#else for (int i = prunePtr[s - 1]; i < prunePtr[s]; ++i) { int lSN = pruneSet[i]; #endif*/ int nSupRs = 0; #if DEBUG if(xi[top++] != lSN) printf("fail"); #endif int cSN = blockSet[lSN];//first col of current SN int cNSN = blockSet[lSN + 1];//first col of Next SN int Li_ptr_cNSN = Li_ptr[cNSN]; int Li_ptr_cSN = Li_ptr[cSN]; int nSNRCur = Li_ptr_cNSN - Li_ptr_cSN; int supWdts = cNSN - cSN;//The width of current src SN int lb = 0, ub = 0; bool sw = true; int beg_col = cSN, end_col = 0; for (int j = Li_ptr_cSN; j < Li_ptr_cNSN; ++j) { //finding the overlap between curCol and curCol+supWdt in the src col if (lR[j] >= curCol && sw) { //src*transpose(row lR[j]) lb = j - Li_ptr_cSN; sw = false; } if (lR[j] < curCol + supWdt && !sw) { ub = j - Li_ptr_cSN; } } nSupRs = Li_ptr_cNSN - Li_ptr_cSN - lb; int ndrow1 = ub - lb + 1; int ndrow3 = nSupRs - ndrow1; src = &lValues[lC[cSN] + lb];//first element of src supernode starting from row lb double *srcL = &lValues[lC[cSN] + ub + 1]; // multiplying L * D for (int l = 0; l < supWdts; ++l) { double tmp = D[cSN + l]; /* double *dst_tmp = &rn_diag[l * nSupRs]; double *src_tmp = &src[l * nSNRCur]; dscal(nSupRs, )*/ for (int l1 = 0; l1 < nSupRs; ++l1) { trn_diag[l * nSupRs + l1] = tmp * src[l * nSNRCur + l1]; } } /*dgemm("N", "C", &ndrow3, &ndrow1, &supWdts, one, srcL, &nSNRCur, src, &nSNRCur, zero, &contribs[ndrow1], &nSupRs);*/ dgemm("N", "C", &nSupRs, &ndrow1, &supWdts, one, trn_diag, &nSupRs, src, &nSNRCur, zero, contribs, &nSupRs); //copying contrib to L for (int i = 0; i < ndrow1; ++i) {//Copy contribs to L int col = map[lR[Li_ptr_cSN + i + lb]];//col in the SN //double ddiag = 1.0 ;/// D[col]; for (int j = i; j < nSupRs; ++j) { int cRow = lR[Li_ptr_cSN + j + lb];//corresponding row in SN //lValues[lC[curCol+col]+ map[cRow]] -= contribs[i*nSupRs+j]; cur[col * nSupR + map[cRow]] -= contribs[i * nSupRs + j]; } } } #ifdef MKL //dpotrf("L",&supWdt,cur,&nSupR,&info); /* for (int m = 0; m < supWdt; ++m) { for (int i = m; i < supWdt; ++i) { contribs[m*su] } } dspff*/ start = std::chrono::system_clock::now(); sym_sytrf(cur, supWdt, nSupR, &nbpivot, threshold); //LAPACKE_dsytrf(LAPACK_COL_MAJOR,'L',supWdt,cur,nSupR,ipiv); //dsytrf("L",) #endif // Making L*D int rowNo = nSupR - supWdt; for (int l = 0; l < supWdt; ++l) { double tmp = cur[l + l * nSupR]; D[curCol + l] = tmp; double *stCol = trn_diag + l * supWdt + l; double *curCol = cur + l * nSupR + l; *stCol = tmp; for (int l1 = 0; l1 < supWdt - l - 1; ++l1) { *(++stCol) = tmp * *(++curCol); } } dtrsm("R", "L", "C", "N", &rowNo, &supWdt, one, trn_diag, &supWdt, &cur[supWdt], &nSupR); for (int k = 0; k < supWdt; ++k) { cur[k * nSupR + k] = 1.0; } //copying 1/Di into D /*for (int l = 0; l < supWdt; ++l) { D[curCol+l] = one[0] / cur[l + l*nSupR]; }*/ /* for (int k = 0; k < nSupR * supWdt; ++k) { std::cout<<cur[k]<<","; } std::cout<<"==== \n";*/ } #ifdef TLAST end = std::chrono::system_clock::now(); elapsed_seconds = end-start; duration1=elapsed_seconds.count(); std::cout<<"++ " <<duration1<<"\n"; #endif } #endif delete[]contribs; delete[]trn_diag; delete[]xi; delete[]map; return true; } #undef TIMING1 #undef BLASTIMING } #endif //PROJECT_PARALLEL_BLOCKED_LDLT_H
sssp.h
/* * sssp.h * LLAMA Graph Analytics * * Copyright 2014 * The President and Fellows of Harvard College. * * Copyright 2014 * Oracle Labs. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef LL_GENERATED_CPP_SSSP_H #define LL_GENERATED_CPP_SSSP_H #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <float.h> #include <limits.h> #include <cmath> #include <algorithm> #include <omp.h> #include "llama/ll_bfs_template.h" #include "llama/ll_writable_graph.h" #include "benchmarks/benchmark.h" #define LL_SSSP_RETURNS_MAX /** * Weighted SSSP */ template <class Graph, class WeightType> class ll_b_sssp_weighted : public ll_benchmark<Graph> { node_t root; WeightType* G_dist; ll_mlcsr_edge_property<WeightType>* G_weight; public: /** * Create the benchmark * * @param root the root * @param weightName the weight property name */ ll_b_sssp_weighted(node_t root, const char* weightName) : ll_benchmark<Graph>("SSSP - Weighted") { this->root = root; this->create_auto_array_for_nodes(G_dist); this->create_auto_property(G_weight, weightName); } /** * Destroy the benchmark */ virtual ~ll_b_sssp_weighted(void) { } /** * Run the benchmark * * @return the numerical result, if applicable */ virtual double run(void) { assert(sizeof(WeightType) >= 4); Graph& G = *this->_graph; ll_mlcsr_edge_property<WeightType>& G_len = *G_weight; ll_spinlock_table lt; ll_memory_helper m; bool fin = false ; bool* G_updated = m.allocate<bool>(G.max_nodes()); bool* G_updated_nxt = m.allocate<bool>(G.max_nodes()); WeightType* G_dist_nxt = (WeightType*) malloc(sizeof(WeightType) * G.max_nodes()); fin = false ; #pragma omp parallel for for (node_t t0 = 0; t0 < G.max_nodes(); t0 ++) { // Assume that INT_MAX-1 is high enough, even if WeightType != int, // and also for now assume that WeightType is at least 4 bytes long G.set_node_prop(G_dist, t0, (WeightType) ((t0 == root)?0:INT_MAX-1)); G.set_node_prop(G_updated, t0, (t0 == root)?true:false); G.set_node_prop(G_dist_nxt, t0, G_dist[t0]); G.set_node_prop(G_updated_nxt, t0, G_updated[t0]); } while ( !fin) { bool __E8 = false ; fin = true ; __E8 = false ; #pragma omp parallel for schedule(dynamic,4096) for (node_t n = 0; n < G.max_nodes(); n ++) { if (G_updated[n]) { ll_edge_iterator iter; G.out_iter_begin(iter, n); for (edge_t s_idx = G.out_iter_next(iter); s_idx != LL_NIL_EDGE; s_idx = G.out_iter_next(iter)) { node_t s = LL_ITER_OUT_NEXT_NODE(G, iter, s_idx); edge_t e; e = s_idx ; { // argmin(argmax) - test and test-and-set WeightType G_dist_nxt_new = G_dist[n] + G_len[e]; if (G_dist_nxt[s]>G_dist_nxt_new) { bool G_updated_nxt_arg = true; lt.acquire_for(s); if (G_dist_nxt[s]>G_dist_nxt_new) { G.set_node_prop(G_dist_nxt, s, G_dist_nxt_new); G.set_node_prop(G_updated_nxt, s, G_updated_nxt_arg); } lt.release_for(s); } } } } } #pragma omp parallel { bool __E8_prv = false ; __E8_prv = false ; #pragma omp for nowait for (node_t t4 = 0; t4 < G.max_nodes(); t4 ++) { G.set_node_prop(G_dist, t4, G_dist_nxt[t4]); G.set_node_prop(G_updated, t4, G_updated_nxt[t4]); G.set_node_prop(G_updated_nxt, t4, false); __E8_prv = __E8_prv || G_updated[t4] ; } ATOMIC_OR(&__E8, __E8_prv); } fin = !__E8 ; } free(G_dist_nxt); return 0; } /** * Finalize the benchmark * * @return the updated numerical result, if applicable */ virtual double finalize(void) { size_t count = 0; int32_t max = 0; for (node_t n = 0; n < this->_graph->max_nodes(); n++) { if (G_dist[n] < INT_MAX-1) { count++; if (G_dist[n] > max) max = G_dist[n]; } } #ifdef LL_SSSP_RETURNS_MAX return max; #else return count; #endif } /** * Print the results * * @param f the output file */ virtual void print_results(FILE* f) { print_results_part(f, this->_graph, G_dist); } }; // BFS/DFS definitions for the procedure template <class Graph> class u_sssp_bfs : public ll_bfs_template <Graph, short, true, false, false, false> { public: u_sssp_bfs(Graph& _G, node_t& _root, int32_t* _dist) : ll_bfs_template<Graph, short, true, false, false, false>(_G), G(_G), root(_root), dist(_dist){} private: // list of varaibles Graph& G; node_t& root; int32_t* dist; protected: virtual void visit_fw(node_t v) { dist[v] = this->get_curr_level(); } virtual void visit_rv(node_t v) {} virtual bool check_navigator(node_t v, edge_t v_idx) {return true;} }; /** * Unweighted SSSP */ template <class Graph> class ll_b_sssp_unweighted_bfs : public ll_benchmark<Graph> { node_t root; int32_t* G_dist; public: /** * Create the benchmark * * @param root the root */ ll_b_sssp_unweighted_bfs(node_t root) : ll_benchmark<Graph>("SSSP - Unweighted, BFS") { this->root = root; this->create_auto_array_for_nodes(G_dist); } /** * Destroy the benchmark */ virtual ~ll_b_sssp_unweighted_bfs(void) { } /** * Run the benchmark * * @return the numerical result, if applicable */ virtual double run(void) { Graph& G = *this->_graph; #pragma omp parallel for for (node_t t0 = 0; t0 < G.max_nodes(); t0++) { G.set_node_prop(G_dist, t0, INT_MAX-1); } G.set_node_prop(G_dist, root, 0); u_sssp_bfs<Graph> _BFS(G, root, G_dist); _BFS.prepare(root); _BFS.do_bfs_forward(); return 0; } /** * Finalize the benchmark * * @return the updated numerical result, if applicable */ virtual double finalize(void) { size_t count = 0; int32_t max = 0; for (node_t n = 0; n < this->_graph->max_nodes(); n++) { if (G_dist[n] < INT_MAX-1) { count++; if (G_dist[n] > max) max = G_dist[n]; } } #ifdef LL_SSSP_RETURNS_MAX return max; #else return count; #endif } /** * Print the results * * @param f the output file */ virtual void print_results(FILE* f) { print_results_part(f, this->_graph, G_dist); } }; /** * Unweighted SSSP */ template <class Graph> class ll_b_sssp_unweighted_iter : public ll_benchmark<Graph> { node_t root; int32_t* G_dist; public: /** * Create the benchmark * * @param root the root */ ll_b_sssp_unweighted_iter(node_t root) : ll_benchmark<Graph>("SSSP - Unweighted, iterative") { this->root = root; this->create_auto_array_for_nodes(G_dist); } /** * Destroy the benchmark */ virtual ~ll_b_sssp_unweighted_iter(void) { } /** * Run the benchmark * * @return the numerical result, if applicable */ virtual double run(void) { Graph& G = *this->_graph; ll_spinlock_table lt; ll_memory_helper m; bool fin = false ; bool* G_updated = m.allocate<bool>(G.max_nodes()); bool* G_updated_nxt = m.allocate<bool>(G.max_nodes()); int32_t* G_dist_nxt = (int32_t*) malloc(sizeof(int32_t) * G.max_nodes()); fin = false ; #pragma omp parallel for for (node_t t0 = 0; t0 < G.max_nodes(); t0 ++) { G.set_node_prop(G_dist, t0, (t0 == root)?0:INT_MAX-1); G.set_node_prop(G_updated, t0, (t0 == root)?true:false); G.set_node_prop(G_dist_nxt, t0, G_dist[t0]); G.set_node_prop(G_updated_nxt, t0, G_updated[t0]); } while ( !fin) { bool __E8 = false ; fin = true ; __E8 = false ; #pragma omp parallel for schedule(dynamic,4096) for (node_t n = 0; n < G.max_nodes(); n ++) { if (G_updated[n]) { ll_edge_iterator iter; G.out_iter_begin(iter, n); for (edge_t s_idx = G.out_iter_next(iter); s_idx != LL_NIL_EDGE; s_idx = G.out_iter_next(iter)) { node_t s = LL_ITER_OUT_NEXT_NODE(G, iter, s_idx); { // argmin(argmax) - test and test-and-set int32_t G_dist_nxt_new = G_dist[n] + 1; if (G_dist_nxt[s]>G_dist_nxt_new) { bool G_updated_nxt_arg = true; lt.acquire_for(s); if (G_dist_nxt[s]>G_dist_nxt_new) { G.set_node_prop(G_dist_nxt, s, G_dist_nxt_new); G.set_node_prop(G_updated_nxt, s, G_updated_nxt_arg); } lt.release_for(s); } } } } } #pragma omp parallel { bool __E8_prv = false ; #pragma omp for nowait for (node_t t4 = 0; t4 < G.max_nodes(); t4 ++) { G.set_node_prop(G_dist, t4, G_dist_nxt[t4]); G.set_node_prop(G_updated, t4, G_updated_nxt[t4]); G.set_node_prop(G_updated_nxt, t4, false); __E8_prv = __E8_prv || G_updated[t4] ; } ATOMIC_OR(&__E8, __E8_prv); } fin = !__E8 ; } free(G_dist_nxt); return 0; } /** * Finalize the benchmark * * @return the updated numerical result, if applicable */ virtual double finalize(void) { size_t count = 0; int32_t max = 0; for (node_t n = 0; n < this->_graph->max_nodes(); n++) { if (G_dist[n] < INT_MAX-1) { count++; if (G_dist[n] > max) max = G_dist[n]; } } #ifdef LL_SSSP_RETURNS_MAX return max; #else return count; #endif } /** * Print the results * * @param f the output file */ virtual void print_results(FILE* f) { print_results_part(f, this->_graph, G_dist); } }; #endif
nvptx_target_printf_codegen.c
// Test target codegen - host bc file has to be created first. // RUN: %clang_cc1 -verify -fopenmp -x c -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc // RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64 // RUN: %clang_cc1 -verify -fopenmp -x c -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc // RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 // expected-no-diagnostics extern int printf(const char *, ...); // Check a simple call to printf end-to-end. // CHECK: [[SIMPLE_PRINTF_TY:%[a-zA-Z0-9_]+]] = type { i32, i64, double } int CheckSimple() { // CHECK: define {{.*}}void [[T1:@__omp_offloading_.+CheckSimple.+]]_worker() #pragma omp target { // Entry point. // CHECK: define {{.*}}void [[T1]]() // Alloca in entry block. // CHECK: [[BUF:%[a-zA-Z0-9_]+]] = alloca [[SIMPLE_PRINTF_TY]] // CHECK: {{call|invoke}} void [[T1]]_worker() // CHECK: br label {{%?}}[[EXIT:.+]] // // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]] // // CHECK: [[MASTER]] // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: [[MTMP1:%.+]] = sub nuw i32 [[MNTH]], [[MWS]] // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]] // printf in master-only basic block. // CHECK: [[FMT:%[0-9]+]] = load{{.*}}%fmt const char* fmt = "%d %lld %f"; // CHECK: [[PTR0:%[0-9]+]] = getelementptr inbounds [[SIMPLE_PRINTF_TY]], [[SIMPLE_PRINTF_TY]]* [[BUF]], i32 0, i32 0 // CHECK: store i32 1, i32* [[PTR0]], align 4 // CHECK: [[PTR1:%[0-9]+]] = getelementptr inbounds [[SIMPLE_PRINTF_TY]], [[SIMPLE_PRINTF_TY]]* [[BUF]], i32 0, i32 1 // CHECK: store i64 2, i64* [[PTR1]], align 8 // CHECK: [[PTR2:%[0-9]+]] = getelementptr inbounds [[SIMPLE_PRINTF_TY]], [[SIMPLE_PRINTF_TY]]* [[BUF]], i32 0, i32 2 // CHECK: store double 3.0{{[^,]*}}, double* [[PTR2]], align 8 // CHECK: [[BUF_CAST:%[0-9]+]] = bitcast [[SIMPLE_PRINTF_TY]]* [[BUF]] to i8* // CHECK: [[RET:%[0-9]+]] = call i32 @vprintf(i8* [[FMT]], i8* [[BUF_CAST]]) printf(fmt, 1, 2ll, 3.0); } return 0; } void CheckNoArgs() { // CHECK: define {{.*}}void [[T2:@__omp_offloading_.+CheckNoArgs.+]]_worker() #pragma omp target { // Entry point. // CHECK: define {{.*}}void [[T2]]() // CHECK: {{call|invoke}} void [[T2]]_worker() // CHECK: br label {{%?}}[[EXIT:.+]] // // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]] // // CHECK: [[MASTER]] // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: [[MTMP1:%.+]] = sub nuw i32 [[MNTH]], [[MWS]] // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]] // printf in master-only basic block. // CHECK: call i32 @vprintf({{.*}}, i8* null){{$}} printf("hello, world!"); } } // Check that printf's alloca happens in the entry block, not inside the if // statement. int foo; void CheckAllocaIsInEntryBlock() { // CHECK: define {{.*}}void [[T3:@__omp_offloading_.+CheckAllocaIsInEntryBlock.+]]_worker() #pragma omp target { // Entry point. // CHECK: define {{.*}}void [[T3]]( // Alloca in entry block. // CHECK: alloca %printf_args // CHECK: {{call|invoke}} void [[T3]]_worker() // CHECK: br label {{%?}}[[EXIT:.+]] // // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]] // // CHECK: [[MASTER]] // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: [[MTMP1:%.+]] = sub nuw i32 [[MNTH]], [[MWS]] // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]] if (foo) { printf("%d", 42); } } }
hist_util.h
/*! * Copyright 2017 by Contributors * \file hist_util.h * \brief Utility for fast histogram aggregation * \author Philip Cho, Tianqi Chen */ #ifndef XGBOOST_COMMON_HIST_UTIL_H_ #define XGBOOST_COMMON_HIST_UTIL_H_ #include <xgboost/data.h> #include <xgboost/generic_parameters.h> #include <limits> #include <vector> #include <algorithm> #include <memory> #include <utility> #include "row_set.h" #include "../tree/param.h" #include "./quantile.h" #include "./timer.h" #include "random.h" namespace xgboost { /*! * \brief A C-style array with in-stack allocation. As long as the array is smaller than * MaxStackSize, it will be allocated inside the stack. Otherwise, it will be * heap-allocated. */ template<typename T, size_t MaxStackSize> class MemStackAllocator { public: explicit MemStackAllocator(size_t required_size): required_size_(required_size) { } T* Get() { if (!ptr_) { if (MaxStackSize >= required_size_) { ptr_ = stack_mem_; } else { ptr_ = reinterpret_cast<T*>(malloc(required_size_ * sizeof(T))); do_free_ = true; } } return ptr_; } ~MemStackAllocator() { if (do_free_) free(ptr_); } private: T* ptr_ = nullptr; bool do_free_ = false; size_t required_size_; T stack_mem_[MaxStackSize]; }; namespace common { /* * \brief A thin wrapper around dynamically allocated C-style array. * Make sure to call resize() before use. */ template<typename T> struct SimpleArray { ~SimpleArray() { free(ptr_); ptr_ = nullptr; } void resize(size_t n) { T* ptr = static_cast<T*>(malloc(n*sizeof(T))); memcpy(ptr, ptr_, n_ * sizeof(T)); free(ptr_); ptr_ = ptr; n_ = n; } T& operator[](size_t idx) { return ptr_[idx]; } T& operator[](size_t idx) const { return ptr_[idx]; } size_t size() const { return n_; } T back() const { return ptr_[n_-1]; } T* data() { return ptr_; } const T* data() const { return ptr_; } T* begin() { return ptr_; } const T* begin() const { return ptr_; } T* end() { return ptr_ + n_; } const T* end() const { return ptr_ + n_; } private: T* ptr_ = nullptr; size_t n_ = 0; }; /*! * \brief A single row in global histogram index. * Directly represent the global index in the histogram entry. */ using GHistIndexRow = Span<uint32_t const>; // A CSC matrix representing histogram cuts, used in CPU quantile hist. class HistogramCuts { // Using friends to avoid creating a virtual class, since HistogramCuts is used as value // object in many places. friend class SparseCuts; friend class DenseCuts; friend class CutsBuilder; protected: using BinIdx = uint32_t; common::Monitor monitor_; std::vector<bst_float> cut_values_; std::vector<uint32_t> cut_ptrs_; std::vector<float> min_vals_; // storing minimum value in a sketch set. public: HistogramCuts(); HistogramCuts(HistogramCuts const& that) = delete; HistogramCuts(HistogramCuts&& that) noexcept(true) { *this = std::forward<HistogramCuts&&>(that); } HistogramCuts& operator=(HistogramCuts const& that) = delete; HistogramCuts& operator=(HistogramCuts&& that) noexcept(true) { monitor_ = std::move(that.monitor_); cut_ptrs_ = std::move(that.cut_ptrs_); cut_values_ = std::move(that.cut_values_); min_vals_ = std::move(that.min_vals_); return *this; } /* \brief Build histogram cuts. */ void Build(DMatrix* dmat, uint32_t const max_num_bins); /* \brief How many bins a feature has. */ uint32_t FeatureBins(uint32_t feature) const { return cut_ptrs_.at(feature+1) - cut_ptrs_[feature]; } // Getters. Cuts should be of no use after building histogram indices, but currently // it's deeply linked with quantile_hist, gpu sketcher and gpu_hist. So we preserve // these for now. std::vector<uint32_t> const& Ptrs() const { return cut_ptrs_; } std::vector<float> const& Values() const { return cut_values_; } std::vector<float> const& MinValues() const { return min_vals_; } size_t TotalBins() const { return cut_ptrs_.back(); } BinIdx SearchBin(float value, uint32_t column_id) { auto beg = cut_ptrs_.at(column_id); auto end = cut_ptrs_.at(column_id + 1); auto it = std::upper_bound(cut_values_.cbegin() + beg, cut_values_.cbegin() + end, value); if (it == cut_values_.cend()) { it = cut_values_.cend() - 1; } BinIdx idx = it - cut_values_.cbegin(); return idx; } BinIdx SearchBin(Entry const& e) { return SearchBin(e.fvalue, e.index); } }; /* \brief An interface for building quantile cuts. * * `DenseCuts' always assumes there are `max_bins` for each feature, which makes it not * suitable for sparse dataset. On the other hand `SparseCuts' uses `GetColumnBatches', * which doubles the memory usage, hence can not be applied to dense dataset. */ class CutsBuilder { public: using WXQSketch = common::WXQuantileSketch<bst_float, bst_float>; protected: HistogramCuts* p_cuts_; /* \brief return whether group for ranking is used. */ static bool UseGroup(DMatrix* dmat); public: explicit CutsBuilder(HistogramCuts* p_cuts) : p_cuts_{p_cuts} {} virtual ~CutsBuilder() = default; static uint32_t SearchGroupIndFromRow( std::vector<bst_uint> const& group_ptr, size_t const base_rowid) { using KIt = std::vector<bst_uint>::const_iterator; KIt res = std::lower_bound(group_ptr.cbegin(), group_ptr.cend() - 1, base_rowid); // Cannot use CHECK_NE because it will try to print the iterator. bool const found = res != group_ptr.cend() - 1; if (!found) { LOG(FATAL) << "Row " << base_rowid << " does not lie in any group!"; } uint32_t group_ind = std::distance(group_ptr.cbegin(), res); return group_ind; } void AddCutPoint(WXQSketch::SummaryContainer const& summary) { if (summary.size > 1 && summary.size <= 16) { /* specialized code categorial / ordinal data -- use midpoints */ for (size_t i = 1; i < summary.size; ++i) { bst_float cpt = (summary.data[i].value + summary.data[i - 1].value) / 2.0f; if (i == 1 || cpt > p_cuts_->cut_values_.back()) { p_cuts_->cut_values_.push_back(cpt); } } } else { for (size_t i = 2; i < summary.size; ++i) { bst_float cpt = summary.data[i - 1].value; if (i == 2 || cpt > p_cuts_->cut_values_.back()) { p_cuts_->cut_values_.push_back(cpt); } } } } /* \brief Build histogram indices. */ virtual void Build(DMatrix* dmat, uint32_t const max_num_bins) = 0; }; /*! \brief Cut configuration for sparse dataset. */ class SparseCuts : public CutsBuilder { /* \brief Distrbute columns to each thread according to number of entries. */ static std::vector<size_t> LoadBalance(SparsePage const& page, size_t const nthreads); Monitor monitor_; public: explicit SparseCuts(HistogramCuts* container) : CutsBuilder(container) { monitor_.Init(__FUNCTION__); } /* \brief Concatonate the built cuts in each thread. */ void Concat(std::vector<std::unique_ptr<SparseCuts>> const& cuts, uint32_t n_cols); /* \brief Build histogram indices in single thread. */ void SingleThreadBuild(SparsePage const& page, MetaInfo const& info, uint32_t max_num_bins, bool const use_group_ind, uint32_t beg, uint32_t end, uint32_t thread_id); void Build(DMatrix* dmat, uint32_t const max_num_bins) override; }; /*! \brief Cut configuration for dense dataset. */ class DenseCuts : public CutsBuilder { protected: Monitor monitor_; public: explicit DenseCuts(HistogramCuts* container) : CutsBuilder(container) { monitor_.Init(__FUNCTION__); } void Init(std::vector<WXQSketch>* sketchs, uint32_t max_num_bins); void Build(DMatrix* p_fmat, uint32_t max_num_bins) override; }; // FIXME(trivialfis): Merge this into generic cut builder. /*! \brief Builds the cut matrix on the GPU. * * \return The row stride across the entire dataset. */ size_t DeviceSketch(int device, int max_bin, int gpu_batch_nrows, DMatrix* dmat, HistogramCuts* hmat); /*! * \brief preprocessed global index matrix, in CSR format * Transform floating values to integer index in histogram * This is a global histogram index. */ struct GHistIndexMatrix { /*! \brief row pointer to rows by element position */ // std::vector<size_t> row_ptr; SimpleArray<size_t> row_ptr; /*! \brief The index data */ SimpleArray<uint32_t> index; /*! \brief hit count of each index */ std::vector<size_t> hit_count; /*! \brief The corresponding cuts */ HistogramCuts cut; // Create a global histogram matrix, given cut void Init(DMatrix* p_fmat, int max_num_bins); // get i-th row inline GHistIndexRow operator[](size_t i) const { return {&index[0] + row_ptr[i], static_cast<GHistIndexRow::index_type>( row_ptr[i + 1] - row_ptr[i])}; } inline void GetFeatureCounts(size_t* counts) const { auto nfeature = cut.Ptrs().size() - 1; for (unsigned fid = 0; fid < nfeature; ++fid) { auto ibegin = cut.Ptrs()[fid]; auto iend = cut.Ptrs()[fid + 1]; for (auto i = ibegin; i < iend; ++i) { counts[fid] += hit_count[i]; } } } private: std::vector<size_t> hit_count_tloc_; }; struct GHistIndexBlock { const size_t* row_ptr; const uint32_t* index; inline GHistIndexBlock(const size_t* row_ptr, const uint32_t* index) : row_ptr(row_ptr), index(index) {} // get i-th row inline GHistIndexRow operator[](size_t i) const { return {&index[0] + row_ptr[i], row_ptr[i + 1] - row_ptr[i]}; } }; class ColumnMatrix; class GHistIndexBlockMatrix { public: void Init(const GHistIndexMatrix& gmat, const ColumnMatrix& colmat, const tree::TrainParam& param); inline GHistIndexBlock operator[](size_t i) const { return {blocks_[i].row_ptr_begin, blocks_[i].index_begin}; } inline size_t GetNumBlock() const { return blocks_.size(); } private: std::vector<size_t> row_ptr_; std::vector<uint32_t> index_; const HistogramCuts* cut_; struct Block { const size_t* row_ptr_begin; const size_t* row_ptr_end; const uint32_t* index_begin; const uint32_t* index_end; }; std::vector<Block> blocks_; }; /*! * \brief used instead of GradStats to have float instead of double to reduce histograms * this improves performance by 10-30% and memory consumption for histograms by 2x * accuracy in both cases is the same */ struct GradStatHist { typedef float GradType; /*! \brief sum gradient statistics */ GradType sum_grad; /*! \brief sum hessian statistics */ GradType sum_hess; GradStatHist() : sum_grad{0}, sum_hess{0} { static_assert(sizeof(GradStatHist) == 8, "Size of GradStatHist is not 8 bytes."); } inline void Add(const GradStatHist& b) { sum_grad += b.sum_grad; sum_hess += b.sum_hess; } inline void Add(const tree::GradStats& b) { sum_grad += b.sum_grad; sum_hess += b.sum_hess; } inline void Add(const GradientPair& p) { this->Add(p.GetGrad(), p.GetHess()); } inline void Add(const GradType& grad, const GradType& hess) { sum_grad += grad; sum_hess += hess; } inline tree::GradStats ToGradStat() const { return tree::GradStats(sum_grad, sum_hess); } inline void SetSubstract(const GradStatHist& a, const GradStatHist& b) { sum_grad = a.sum_grad - b.sum_grad; sum_hess = a.sum_hess - b.sum_hess; } inline void SetSubstract(const tree::GradStats& a, const GradStatHist& b) { sum_grad = a.sum_grad - b.sum_grad; sum_hess = a.sum_hess - b.sum_hess; } inline GradType GetGrad() const { return sum_grad; } inline GradType GetHess() const { return sum_hess; } inline static void Reduce(GradStatHist& a, const GradStatHist& b) { // NOLINT(*) a.Add(b); } }; using GHistRow = Span<GradStatHist>; /*! * \brief histogram of gradient statistics for multiple nodes */ class HistCollection { public: // access histogram for i-th node inline GHistRow operator[](bst_uint nid) { AddHistRow(nid); return { const_cast<GradStatHist*>(dmlc::BeginPtr(data_arr_[nid])), nbins_}; } // have we computed a histogram for i-th node? inline bool RowExists(bst_uint nid) const { return nid < data_arr_.size(); } // initialize histogram collection inline void Init(uint32_t nbins) { if (nbins_ != nbins) { data_arr_.clear(); nbins_ = nbins; } } // create an empty histogram for i-th node inline void AddHistRow(bst_uint nid) { if (data_arr_.size() <= nid) { size_t prev = data_arr_.size(); data_arr_.resize(nid + 1); for (size_t i = prev; i < data_arr_.size(); ++i) { data_arr_[i].resize(nbins_); } } } private: /*! \brief number of all bins over all features */ uint32_t nbins_ = 0; std::vector<std::vector<GradStatHist>> data_arr_; }; /*! * \brief builder for histograms of gradient statistics */ class GHistBuilder { public: // initialize builder inline void Init(size_t nthread, uint32_t nbins) { nthread_ = nthread; nbins_ = nbins; } void BuildBlockHist(const std::vector<GradientPair>& gpair, const RowSetCollection::Elem row_indices, const GHistIndexBlockMatrix& gmatb, GHistRow hist) { constexpr int kUnroll = 8; // loop unrolling factor const int32_t nblock = gmatb.GetNumBlock(); const size_t nrows = row_indices.end - row_indices.begin; const size_t rest = nrows % kUnroll; #pragma omp parallel for for (int32_t bid = 0; bid < nblock; ++bid) { auto gmat = gmatb[bid]; for (size_t i = 0; i < nrows - rest; i += kUnroll) { size_t rid[kUnroll]; size_t ibegin[kUnroll]; size_t iend[kUnroll]; GradientPair stat[kUnroll]; for (int k = 0; k < kUnroll; ++k) { rid[k] = row_indices.begin[i + k]; } for (int k = 0; k < kUnroll; ++k) { ibegin[k] = gmat.row_ptr[rid[k]]; iend[k] = gmat.row_ptr[rid[k] + 1]; } for (int k = 0; k < kUnroll; ++k) { stat[k] = gpair[rid[k]]; } for (int k = 0; k < kUnroll; ++k) { for (size_t j = ibegin[k]; j < iend[k]; ++j) { const uint32_t bin = gmat.index[j]; hist[bin].Add(stat[k]); } } } for (size_t i = nrows - rest; i < nrows; ++i) { const size_t rid = row_indices.begin[i]; const size_t ibegin = gmat.row_ptr[rid]; const size_t iend = gmat.row_ptr[rid + 1]; const GradientPair stat = gpair[rid]; for (size_t j = ibegin; j < iend; ++j) { const uint32_t bin = gmat.index[j]; hist[bin].Add(stat); } } } } uint32_t GetNumBins() { return nbins_; } private: /*! \brief number of threads for parallel computation */ size_t nthread_; /*! \brief number of all bins over all features */ uint32_t nbins_; }; void BuildHistLocalDense(size_t istart, size_t iend, size_t nrows, const size_t* rid, const uint32_t* index, const GradientPair::ValueT* pgh, const size_t* row_ptr, GradStatHist::GradType* data_local_hist, GradStatHist* grad_stat); void BuildHistLocalSparse(size_t istart, size_t iend, size_t nrows, const size_t* rid, const uint32_t* index, const GradientPair::ValueT* pgh, const size_t* row_ptr, GradStatHist::GradType* data_local_hist, GradStatHist* grad_stat); void SubtractionTrick(GHistRow self, GHistRow sibling, GHistRow parent); } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_HIST_UTIL_H_
matrix_op-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file matrix_op-inl.h * \brief Function definition of matrix related operators */ #ifndef MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_ #define MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_ #include <mxnet/operator_util.h> #include <vector> #include <algorithm> #include <utility> #include <type_traits> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "../channel_op_common.h" #include "../mxnet_op.h" #include "broadcast_reduce_op.h" #include "./init_op.h" #include "../../common/static_array.h" #include "./slice-inl.h" #if MXNET_USE_CUDA #include <thrust/device_vector.h> #endif #ifdef __CUDACC__ #include "./pseudo2DTranspose_op-inl.cuh" #endif namespace mxnet { namespace op { struct ReshapeParam : public dmlc::Parameter<ReshapeParam> { mxnet::TShape target_shape; bool keep_highest; mxnet::Tuple<int> shape; bool reverse; DMLC_DECLARE_PARAMETER(ReshapeParam) { DMLC_DECLARE_FIELD(shape) .set_default(mxnet::Tuple<int>()) .describe("The target shape"); DMLC_DECLARE_FIELD(reverse) .set_default(false) .describe("If true then the special values are inferred from right to left"); DMLC_DECLARE_FIELD(target_shape) .set_default(mxnet::TShape(0, -1)) .describe("(Deprecated! Use ``shape`` instead.) " "Target new shape. One and only one dim can be 0, " "in which case it will be inferred from the rest of dims"); DMLC_DECLARE_FIELD(keep_highest).set_default(false) .describe("(Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged." "If set to true, then the first dim in target_shape is ignored," "and always fixed as input"); } bool operator==(const ReshapeParam &other) const { return this->target_shape == other.target_shape && this->keep_highest == other.keep_highest && this->shape == other.shape && this->reverse == other.reverse; } }; template<typename IType> inline mxnet::TShape InferReshapeShape(const mxnet::Tuple<IType>& shape, const mxnet::TShape& dshape, bool reverse) { std::vector<IType> dshape_vec; std::vector<IType> param_shape_vec(shape.begin(), shape.end()); for (int i = 0; i < dshape.ndim(); ++i) { dshape_vec.push_back(dshape[i]); } std::vector<IType> tmp; size_t src_idx = 0; int inf_idx = -1; if (reverse) { std::reverse(dshape_vec.begin(), dshape_vec.end()); std::reverse(param_shape_vec.begin(), param_shape_vec.end()); } auto dshape_len = dshape_vec.size(); auto params_len = param_shape_vec.size(); for (size_t i = 0; i < params_len; ++i) { IType proposed_dim = param_shape_vec[i]; if (proposed_dim == 0) { // keep same CHECK_LT(src_idx, dshape_len); tmp.push_back(dshape_vec[src_idx++]); } else if (proposed_dim == -1) { // infer CHECK_LT(inf_idx, 0) << "One and only one dim can be inferred"; inf_idx = i; tmp.push_back(1); src_idx++; } else if (proposed_dim == -2) { // copy all remaining dims from source while (src_idx < dshape_len) { const int dn = dshape_vec[src_idx++]; tmp.push_back(dn); } } else if (proposed_dim == -3) { // merge two dims from source CHECK_LT(src_idx, dshape_len-1); const int d1 = dshape_vec[src_idx++]; const int d2 = dshape_vec[src_idx++]; if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) { tmp.push_back(-1); } else { tmp.push_back(d1 * d2); } } else if (proposed_dim == -4) { // split the source dim s into two dims // read the left dim and then the right dim (either can be -1) CHECK_LT(i + 2, params_len); CHECK_LT(src_idx, dshape_len); const int d0 = dshape_vec[src_idx++]; IType d1 = param_shape_vec[++i]; IType d2 = param_shape_vec[++i]; CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1."; if (d1 == -1 && d0 >= 0) d1 = d0 / d2; // d0 must be known to do this if (d2 == -1 && d0 >= 0) d2 = d0 / d1; // d0 must be known to do this CHECK(d1 * d2 == static_cast<IType>(d0) || static_cast<IType>(d0) == IType(-1)) << "Split dims " << d1 << ", " << d2 << " do not divide original dim " << d0; tmp.push_back(d1); tmp.push_back(d2); } else { // greater than 0, new shape tmp.push_back(proposed_dim); src_idx++; } } if (inf_idx >= 0) { if (shape_is_known(dshape)) { IType new_size = 1; for (IType x : tmp) new_size *= x; tmp[inf_idx] = dshape.Size() / new_size; } else { tmp[inf_idx] = -1; } } if (reverse) { std::reverse(param_shape_vec.begin(), param_shape_vec.end()); std::reverse(dshape_vec.begin(), dshape_vec.end()); std::reverse(tmp.begin(), tmp.end()); } mxnet::TShape oshape(tmp.begin(), tmp.end()); return oshape; } inline bool ReverseReshapeInferShape(mxnet::TShape *in, const mxnet::TShape& out) { if (shape_is_known(*in) && shape_is_known(out)) { return true; } else if (!shape_is_known(out)) { return false; } else { int zero_axis = -1; int known_dim_size_prod = 1; for (int i = 0; i < in->ndim(); i++) { if (!mxnet::dim_size_is_known(*in, i)) { if (zero_axis != -1) return false; // more than 1 zero found. else zero_axis = i; } else { known_dim_size_prod *= (*in)[i]; } } (*in)[zero_axis] = out.Size() / known_dim_size_prod; return true; } } inline bool ReshapeShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const ReshapeParam& param_ = nnvm::get<ReshapeParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]"; CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape &dshape = (*in_attrs)[0]; if (!mxnet::ndim_is_known(dshape)) return false; mxnet::TShape oshape; if (param_.shape.ndim() != 0) { oshape = InferReshapeShape(param_.shape, dshape, param_.reverse); } else if (param_.target_shape.ndim() != -1) { LOG(INFO) << "Using target_shape will be deprecated."; oshape = param_.target_shape; int neg_count = 0; index_t inf_idx = 0; index_t start_idx = param_.keep_highest ? 1 : 0; if (param_.keep_highest) { oshape[0] = dshape[0]; } for (int i = start_idx; i < oshape.ndim(); ++i) { if (oshape[i] == 0) { neg_count++; inf_idx = i; } } if (neg_count == 1) { oshape[inf_idx] = 1; oshape[inf_idx] = dshape.Size() / oshape.Size(); } } else { return shape_is_known((*out_attrs)[0]) && ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]); } ReverseReshapeInferShape(&dshape, oshape); #if 0 CHECK_EQ(oshape.Size(), dshape.Size()) << "Target shape size is different to source. " << "Target: " << oshape << "\nSource: " << dshape; #endif SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); return ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]); } inline bool FlattenShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]"; CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape &dshape = (*in_attrs)[0]; if (!shape_is_known(dshape)) return false; int target_dim = 1; for (int i = 1; i < dshape.ndim(); ++i) { target_dim *= dshape[i]; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::Shape2(dshape[0], target_dim)); return true; } struct TransposeParam : public dmlc::Parameter<TransposeParam> { mxnet::TShape axes; DMLC_DECLARE_PARAMETER(TransposeParam) { DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, -1)) .describe("Target axis order. By default the axes will be inverted."); } bool operator==(const TransposeParam &other) const { return this->axes == other.axes; } }; /*! * \brief This function performs transpose operation on a 2D matrix by utilizing the L1 cache * \param in input tensor * \param out output tensor * \param row shape of dim 0 of input * \param col shape of dim 1 of input * \tparam DType Data type * \tparam is_addto */ template<typename DType, bool is_addto> MSHADOW_XINLINE void Transpose2D(const DType *in, DType *out, index_t row, index_t col) { // ensure cache line hits and prevent cache miss for any configuration // L1 cache size to be utilized = 32kb = 2^15 // Largest size of a single unit of any dtype <= 8 byte = 2^3 // Number of elements - (2^15/2^3) = 2^12 // Block-size - 2^6 v 2^6 (64 v 64) // But we could leverage unrolling of for loops (for parallelization) // Block-size - 2^5 v 2^5 (32 v 32) with potential 4 pragma for loop unrolled // blocksize * blocksize * num_threads = cache_size / dtype_size // Instead of explicit unroll, let compiler figure out optimal unroll factor const index_t blocksize = 32; // collapse 2 parallelizes 2 for loops // inner 2 for loops aren't parallelized to prevent cache miss // Microsoft Visual C++ compiler does not support omp collapse #ifdef _MSC_VER #pragma omp parallel for #else #pragma omp parallel for collapse(2) #endif // _MSC_VER for (index_t i = 0; i < row; i += blocksize) { for (index_t j = 0; j < col; j += blocksize) { // transpose the block for (index_t a = j; (a < blocksize + j) && (a < col); ++a) { for (index_t b = i; (b < blocksize + i) && (b < row); ++b) { if (!is_addto) { out[a * row + b] = in[b * col + a]; } else { out[a * row + b] += in[b * col + a]; } } } } } } inline bool IsIdentityTranspose(const TShape& axes) { for (dim_t i = 0; i < axes.ndim(); i++) { if (axes[i] != i) return false; } return true; } template<typename xpu, bool is_addto = false> void TransposeImpl(RunContext ctx, const TBlob& src, const TBlob& ret, const mxnet::TShape& axes) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(src.type_flag_, ret.type_flag_); // zero-size tensor, no need to compute if (src.shape_.Size() == 0U) return; Stream<xpu> *s = ctx.get_stream<xpu>(); #ifdef __CUDACC__ // This transpose can be used only if there exist n and m such that: // params = (0, ..., n-1, n+m, ..., params.size, n, ..., n+m-1) // Example: (0, 2, 3, 1) or (0, 3, 1, 2), but not (0, 2, 1, 3). if (isPseudo2DTranspose(axes)) { MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, { transpose_pseudo2D<DType, is_addto>(ret, src, axes, s); }); return; } #endif // Special handle the identity case if (IsIdentityTranspose(axes)) { MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, { Tensor<xpu, 1, DType> in = src.get_with_shape<xpu, 1, DType>(mshadow::Shape1(src.Size()), s); Tensor<xpu, 1, DType> out = ret.get_with_shape<xpu, 1, DType>(mshadow::Shape1(ret.Size()), s); if (!is_addto) { // Use memcpy to accelerate the speed Copy(out, in, s); } else { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, kAddTo>, xpu>::Launch( s, ret.Size(), out.dptr_, in.dptr_); } }); return; } // Handle the general transpose case MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, { switch (axes.ndim()) { case 2: { Tensor<xpu, 2, DType> in = src.get<xpu, 2, DType>(s); Tensor<xpu, 2, DType> out = ret.get<xpu, 2, DType>(s); if (ctx.get_ctx().dev_mask() == cpu::kDevMask) { Transpose2D<DType, is_addto>(in.dptr_, out.dptr_, in.shape_[0], in.shape_[1]); } else { LOG(FATAL) << "Not Implemented. We should never reach here because the 2D case " "in GPU has been covered by transpose_pseudo2D." " Report an issue in Github."; } break; } case 3: { Tensor<xpu, 3, DType> in = src.get<xpu, 3, DType>(s); Tensor<xpu, 3, DType> out = ret.get<xpu, 3, DType>(s); if (!is_addto) { out = transpose(in, axes.get<3>()); } else { out += transpose(in, axes.get<3>()); } break; } case 4: { Tensor<xpu, 4, DType> in = src.get<xpu, 4, DType>(s); Tensor<xpu, 4, DType> out = ret.get<xpu, 4, DType>(s); if (!is_addto) { out = transpose(in, axes.get<4>()); } else { out += transpose(in, axes.get<4>()); } break; } case 5: { Tensor<xpu, 5, DType> in = src.get<xpu, 5, DType>(s); Tensor<xpu, 5, DType> out = ret.get<xpu, 5, DType>(s); if (!is_addto) { out = transpose(in, axes.get<5>()); } else { out += transpose(in, axes.get<5>()); } break; } case 6: { Tensor<xpu, 6, DType> in = src.get<xpu, 6, DType>(s); Tensor<xpu, 6, DType> out = ret.get<xpu, 6, DType>(s); if (!is_addto) { out = transpose(in, axes.get<6>()); } else { out += transpose(in, axes.get<6>()); } break; } default: LOG(FATAL) << "Transpose support at most 6 dimensions"; break; } }); } // matrix transpose template<typename xpu> void Transpose(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { if (req[0] == kNullOp) { return; } const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed); CHECK(req[0] == kWriteTo || req[0] == kAddTo) << "Transpose only supports kNullOp, kWriteTo and kAddTo"; mxnet::TShape axes; if (param.axes.ndim() == 0) { axes = mxnet::TShape(inputs[0].ndim(), -1); for (int i = 0; i < axes.ndim(); ++i) { axes[i] = axes.ndim() - 1 - i; } } else { axes = common::CanonicalizeAxes(param.axes); } if (req[0] == kAddTo) { TransposeImpl<xpu, true>(ctx.run_ctx, inputs[0], outputs[0], axes); } else { TransposeImpl<xpu, false>(ctx.run_ctx, inputs[0], outputs[0], axes); } } inline bool TransposeShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape& shp = (*in_attrs)[0]; mxnet::TShape& out_shp = (*out_attrs)[0]; if (!mxnet::ndim_is_known(shp) && !mxnet::ndim_is_known(out_shp)) return false; // none of the shapes is known CHECK_LE(shp.ndim(), 6) << "Transpose support at most 6 dimensions"; if (out_shp.ndim() >= 0 && shp.ndim() >= 0) CHECK_EQ(out_shp.ndim(), shp.ndim()); mxnet::TShape get(std::max(shp.ndim(), out_shp.ndim()), -1); mxnet::TShape ret(std::max(shp.ndim(), out_shp.ndim()), -1); if (param.axes.ndim() == 0) { for (int i = 0; i < shp.ndim(); ++i) { ret[i] = shp[shp.ndim()-1-i]; } for (int i = 0; i < out_shp.ndim(); ++i) { get[shp.ndim()-1-i] = out_shp[i]; } } else { CHECK_EQ(std::max(shp.ndim(), out_shp.ndim()), param.axes.ndim()); for (int i = 0; i < shp.ndim(); ++i) { CHECK(param.axes[i] < static_cast<int64_t>(shp.ndim())); ret[i] = shp[param.axes[i]]; } for (int i = 0; i < out_shp.ndim(); ++i) { get[param.axes[i]] = out_shp[i]; } } SHAPE_ASSIGN_CHECK(*in_attrs, 0, get); SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret); return shape_is_known(ret); } struct ExpandDimParam : public dmlc::Parameter<ExpandDimParam> { int axis; DMLC_DECLARE_PARAMETER(ExpandDimParam) { DMLC_DECLARE_FIELD(axis) .describe("Position where new axis is to be inserted. Suppose that " "the input `NDArray`'s dimension is `ndim`, the range of " "the inserted axis is `[-ndim, ndim]`"); } bool operator==(const ExpandDimParam &other) const { return this->axis == other.axis; } }; inline bool ExpandDimShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const ExpandDimParam& param = nnvm::get<ExpandDimParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape& ishape = (*in_attrs)[0]; mxnet::TShape& oshape = (*out_attrs)[0]; if (!mxnet::ndim_is_known(ishape) && !mxnet::ndim_is_known(oshape)) { return false; } int indim = ishape.ndim(); bool unknown_ishape = false; if (-1 == indim) { indim = oshape.ndim() - 1; unknown_ishape = true; } int axis = param.axis; if (axis < 0) { axis += indim + 1; } CHECK(axis >= 0 && axis <= indim) << "axis must be in the range [" << -indim << ", " << indim << "] (" << param.axis << " provided)"; mxnet::TShape ret(indim + 1, -1); for (int i = 0; i < axis; ++i) { ret[i] = (unknown_ishape? -1 : ishape[i]); } ret[axis] = 1; for (int i = axis+1; i < indim+1; ++i) { ret[i] = (unknown_ishape? -1 : ishape[i-1]); } SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret); ret = mxnet::TShape(indim, -1); for (int i = 0; i < axis; ++i) ret[i] = oshape[i]; for (int i = axis+1; i < indim+1; ++i) ret[i-1] = oshape[i]; SHAPE_ASSIGN_CHECK(*in_attrs, 0, ret); return shape_is_known(in_attrs->at(0)) && shape_is_known(out_attrs->at(0)); } // Currently MKLDNN only supports step = 1 or step has no value inline bool SupportMKLDNNSlice(const SliceParam& param) { if (param.step.ndim() == 0U) return true; for (int i = 0; i < param.step.ndim(); ++i) { if (param.step[i].has_value() && param.step[i].value() != 1) return false; } return true; } inline bool SliceForwardInferStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, DispatchMode* dispatch_mode, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1); CHECK_EQ(out_attrs->size(), 1); const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); const auto& in_stype = in_attrs->at(0); auto& out_stype = out_attrs->at(0); bool dispatched = false; const auto dispatch_ex = DispatchMode::kFComputeEx; // If step = 1, no need to fallback; otherwise fallback to dense bool trivial_step = false; if (param.step.ndim() == 0U) { trivial_step = true; } else if (param.step.ndim() == 1U && (!param.step[0].has_value() || param.step[0].value() == 1)) { trivial_step = true; } if (in_stype == kDefaultStorage) { #if MXNET_USE_MKLDNN == 1 if (dev_mask == Context::kCPU && MKLDNNEnvSet() && SupportMKLDNNSlice(param)) { dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, dispatch_ex); } #endif if (!dispatched) { dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } } if (!dispatched && in_stype == kCSRStorage && trivial_step) { dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched) { dispatched = dispatch_fallback(out_attrs, dispatch_mode); } return dispatched; } // slice the indptr of a csr struct SliceCsrIndPtr { template<typename IType> MSHADOW_XINLINE static void Map(int i, IType* out, const IType* in, const IType* base) { KERNEL_ASSIGN(out[i], kWriteTo, in[i] - *base); } }; /* * a wrapper to launch SliceCsrIndPtr kernel. * slice [src[begin] .. src[end]) and store in dst[0, end - begin) */ template<typename xpu, typename IType> void SliceCsrIndPtrImpl(const int begin, const int end, RunContext ctx, const IType* src, IType* dst) { using namespace mshadow; using namespace mxnet_op; Stream<xpu> *s = ctx.get_stream<xpu>(); int indptr_len = end - begin + 1; Kernel<SliceCsrIndPtr, xpu>::Launch(s, indptr_len, dst, src + begin, src + begin); } /* * Slice a CSR NDArray for first dimension */ template<typename xpu> void SliceDimOneCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx, const NDArray &in, const NDArray &out) { using namespace mshadow; using namespace mxnet_op; using namespace csr; nnvm::dim_t begin_row = begin[0]; nnvm::dim_t end_row = end[0]; nnvm::dim_t indptr_len = end_row - begin_row + 1; out.CheckAndAllocAuxData(kIndPtr, Shape1(indptr_len)); // assume idx indptr share the same type MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIndPtr), RType, { MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIdx), IType, { MSHADOW_TYPE_SWITCH(in.dtype(), DType, { RType* in_indptr = in.aux_data(kIndPtr).dptr<RType>(); RType* out_indptr = out.aux_data(kIndPtr).dptr<RType>(); SliceCsrIndPtrImpl<xpu, RType>(begin_row, end_row, ctx.run_ctx, in_indptr, out_indptr); Stream<xpu> *s = ctx.get_stream<xpu>(); RType nnz = 0; mshadow::Copy(Tensor<cpu, 1, RType>(&nnz, Shape1(1)), Tensor<xpu, 1, RType>(out_indptr + indptr_len - 1, Shape1(1), s)); // return csr zeros if nnz = 0 if (nnz == 0) { out.set_aux_shape(kIdx, Shape1(0)); return; } // copy indices and values out.CheckAndAllocAuxData(kIdx, Shape1(nnz)); out.CheckAndAllocData(Shape1(nnz)); IType* in_idx = in.aux_data(kIdx).dptr<IType>(); IType* out_idx = out.aux_data(kIdx).dptr<IType>(); DType* in_data = in.data().dptr<DType>(); DType* out_data = out.data().dptr<DType>(); RType offset = 0; mshadow::Copy(Tensor<cpu, 1, RType>(&offset, Shape1(1)), Tensor<xpu, 1, RType>(in_indptr + begin_row, Shape1(1), s)); mshadow::Copy(Tensor<xpu, 1, IType>(out_idx, Shape1(nnz), s), Tensor<xpu, 1, IType>(in_idx + offset, Shape1(nnz), s), s); mshadow::Copy(Tensor<xpu, 1, DType>(out_data, Shape1(nnz), s), Tensor<xpu, 1, DType>(in_data + offset, Shape1(nnz), s), s); }); }); }); } /*! * \brief slice a CSRNDArray for two dimensions */ struct SliceDimTwoCsrAssign { /*! * \brief This function slices a CSRNDArray on axis one between begin_col and end_col * \param i loop index * \param out_idx output csr ndarray column indices * \param out_data output csr ndarray data * \param out_indptr output csr ndarray row index pointer * \param in_idx input csr ndarray column indices * \param in_data input csr ndarray data * \param in_indptr input csr ndarray row index pointer * \param begin_col begin column indice * \param end_col end column indice */ template<typename IType, typename RType, typename DType> MSHADOW_XINLINE static void Map(int i, IType* out_idx, DType* out_data, const RType* out_indptr, const IType* in_idx, const DType* in_data, const RType* in_indptr, const int begin_col, const int end_col) { RType ind = out_indptr[i]; for (RType j = in_indptr[i]; j < in_indptr[i+1]; j++) { // indices of CSRNDArray are in ascending order per row if (in_idx[j] >= end_col) { break; } else if (in_idx[j] >= begin_col) { out_idx[ind] = in_idx[j] - begin_col; out_data[ind] = in_data[j]; ind++; } } } }; /* * Slice a CSR NDArray for two dimensions */ template<typename xpu> void SliceDimTwoCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx, const NDArray &in, const NDArray &out); template<typename xpu> void SliceCsrImpl(const SliceParam &param, const OpContext& ctx, const NDArray &in, OpReqType req, const NDArray &out) { if (req == kNullOp) return; CHECK_NE(req, kAddTo) << "kAddTo for Slice on CSR input is not supported"; CHECK_NE(req, kWriteInplace) << "kWriteInplace for Slice on CSR input is not supported"; const mxnet::TShape ishape = in.shape(); const mxnet::TShape oshape = out.shape(); int N = ishape.ndim(); mxnet::TShape begin(N, -1), end(N, -1); for (int i = 0; i < N; ++i) { int s = 0; if (i < param.begin.ndim() && param.begin[i]) { s = *param.begin[i]; if (s < 0) s += ishape[i]; } begin[i] = s; end[i] = s + oshape[i]; } switch (N) { case 1: { SliceDimOneCsrImpl<xpu>(begin, end, ctx, in, out); break; } case 2: { SliceDimTwoCsrImpl<xpu>(begin, end, ctx, in, out); break; } default: LOG(FATAL) << "CSR is only for 2-D shape"; break; } } template<typename xpu> void SliceEx(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { CHECK_EQ(inputs.size(), 1); CHECK_EQ(outputs.size(), 1); const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); auto in_stype = inputs[0].storage_type(); if (in_stype == kCSRStorage) { SliceCsrImpl<xpu>(param, ctx, inputs[0], req[0], outputs[0]); } else { LOG(FATAL) << "Slice not implemented for storage type" << in_stype; } } template<int ndim> inline bool GetIndexRange(const mxnet::TShape& dshape, const mxnet::Tuple<dmlc::optional<index_t>>& param_begin, const mxnet::Tuple<dmlc::optional<index_t>>& param_end, const mxnet::Tuple<dmlc::optional<index_t>>& param_step, common::StaticArray<index_t, ndim>* begin, common::StaticArray<index_t, ndim>* end, common::StaticArray<index_t, ndim>* step) { // Function returns false if output is zero-sized, true otherwise. bool zero_size_shape = false; CHECK_NE(dshape.ndim(), 0U); CHECK_LE(param_begin.ndim(), dshape.ndim()) << "Slicing axis exceeds data dimensions"; CHECK_LE(param_end.ndim(), dshape.ndim()) << "Slicing axis exceeds data dimensions"; CHECK_EQ(param_begin.ndim(), param_end.ndim()) << "begin and end must have the same length"; CHECK_EQ(ndim, dshape.ndim()) << "Static array size=" << ndim << " is not equal to data shape ndim=" << dshape.ndim(); if (param_step.ndim() > 0) { CHECK_EQ(param_step.ndim(), param_begin.ndim()) << "step and begin must have the same length"; } for (int i = 0; i < param_begin.ndim(); ++i) { index_t s = param_step.ndim() > 0 && param_step[i].has_value() ? param_step[i].value() : 1; CHECK_NE(s, 0) << "slice op step[" << i << "] cannot be 0"; index_t b = 0, e = 0; const index_t len = dshape[i]; if (len > 0) { b = param_begin[i].has_value() ? param_begin[i].value() : (s < 0 ? len - 1 : 0); e = param_end[i].has_value() ? param_end[i].value() : (s < 0 ? -1 : len); if (b < 0) { b += len; } if (e < 0 && param_end[i].has_value()) { e += len; } // move the begin and end to correct position for calculating dim size b = (b < 0 && s > 0) ? 0 : b; b = (b > len - 1 && s < 0) ? len - 1 : b; // if the start value lead to empty tensor under step s, use -1 for indication b = (b < 0 || b > len - 1) ? -1 : b; e = e > -1 ? e : -1; e = e > len ? len : e; } else if (len == 0) { b = 0; e = 0; } (*begin)[i] = b; (*end)[i] = e; (*step)[i] = s; // checking begin==end if (b == e) { zero_size_shape = true; } } for (int i = param_begin.ndim(); i < dshape.ndim(); ++i) { (*begin)[i] = 0; (*end)[i] = dshape[i]; (*step)[i] = 1; } return zero_size_shape; } inline void SetSliceOpOutputDimSize(const mxnet::TShape& dshape, const index_t i, const index_t b, const index_t e, const index_t s, mxnet::TShape* oshape) { if (!mxnet::dim_size_is_known(dshape, i)) { (*oshape)[i] = -1; return; } if (e != b && b >= 0) { if (s > 0) { (*oshape)[i] = e > b ? (e - b - 1) / s + 1 : 0; } else { (*oshape)[i] = e < b ? (b - e - 1) / (-s) + 1 : 0; } } else { (*oshape)[i] = 0; } } inline bool SliceOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& dshape = (*in_attrs)[0]; if (!mxnet::ndim_is_known(dshape)) return false; CHECK_GT(dshape.ndim(), 0) << "slice only works for ndim > 0"; const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); mxnet::TShape oshape = dshape; MXNET_NDIM_SWITCH(dshape.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step); for (int i = 0; i < param.begin.ndim(); ++i) { const index_t b = begin[i], e = end[i], s = step[i]; SetSliceOpOutputDimSize(dshape, i, b, e, s, &oshape); } }) SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); return shape_is_known(dshape) && shape_is_known(oshape); } template<int ndim, int req, typename xpu> struct slice_forward; template<int ndim, int req> struct slice_forward<ndim, req, gpu> { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data, const mshadow::Shape<ndim> dshape, const mshadow::Shape<ndim> oshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = dshape[ndim-1]; const index_t out_last_dim_size = oshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; const index_t j = i % out_last_dim_size; index_t irow = 0; // row id of flattend 2D data index_t stride = 1; index_t idx = i / out_last_dim_size; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % oshape[k]) * step[k] + begin[k]); idx /= oshape[k]; stride *= dshape[k]; } KERNEL_ASSIGN(out[i], req, data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]); } }; template<int ndim, int req> struct slice_forward<ndim, req, cpu> { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data, const mshadow::Shape<ndim> dshape, const mshadow::Shape<ndim> oshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = dshape[ndim-1]; const index_t out_last_dim_size = oshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; index_t out_offset = i * out_last_dim_size; for (index_t j = 0; j < out_last_dim_size; ++j) { index_t irow = 0; // row id of flattend 2D data index_t stride = 1; index_t idx = i; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % oshape[k]) * step[k] + begin[k]); idx /= oshape[k]; stride *= dshape[k]; } KERNEL_ASSIGN(out[out_offset++], req, data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]); } } }; template<typename xpu> void SliceOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); if (req[0] == kNullOp) return; using namespace mshadow; Stream<xpu>* s = ctx.get_stream<xpu>(); const TBlob& data = inputs[0]; const TBlob& out = outputs[0]; if (out.Size() == 0) return; const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); MXNET_NDIM_SWITCH(data.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step); MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { size_t num_threads = out.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= out.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s, num_threads, out.dptr<DType>(), data.dptr<DType>(), data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step); }) }) }) } template<int ndim, int req, typename xpu> struct slice_assign; template<int ndim, int req> struct slice_assign<ndim, req, cpu> { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val, const mshadow::Shape<ndim> oshape, const mshadow::Shape<ndim> vshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = oshape[ndim-1]; const index_t out_last_dim_size = vshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; index_t offset = i * out_last_dim_size; for (index_t j = 0; j < out_last_dim_size; ++j) { index_t irow = 0; // row id of flattend 2D out index_t stride = 1; index_t idx = i; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % vshape[k]) * step[k] + begin[k]); idx /= vshape[k]; stride *= oshape[k]; } KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val[offset++]); } } }; template<int ndim, int req> struct slice_assign<ndim, req, gpu> { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val, const mshadow::Shape<ndim> oshape, const mshadow::Shape<ndim> vshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = oshape[ndim-1]; const index_t out_last_dim_size = vshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; const index_t j = i % out_last_dim_size; index_t irow = 0; // row id of flattend 2D out index_t stride = 1; index_t idx = i / out_last_dim_size; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % vshape[k]) * step[k] + begin[k]); idx /= vshape[k]; stride *= oshape[k]; } KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val[i]); } }; template<typename xpu> void SliceOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); if (req[0] == kNullOp) return; using namespace mshadow; Stream<xpu>* s = ctx.get_stream<xpu>(); const TBlob& ograd = inputs[0]; const TBlob& igrad = outputs[0]; const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); if (req[0] == kWriteTo) { Fill(s, igrad, req[0], 0); } else if (req[0] == kWriteInplace) { LOG(FATAL) << "_slice_backward does not support kWriteInplace"; } if (ograd.Size() == 0) return; MXNET_NDIM_SWITCH(ograd.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(igrad.shape_, param.begin, param.end, param.step, &begin, &end, &step); MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { int num_threads = ograd.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= ograd.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads, igrad.dptr<DType>(), ograd.dptr<DType>(), igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step); }) }) }) } inline bool SliceAssignOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 2U); CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& dshape = (*in_attrs)[0]; if (!mxnet::ndim_is_known(dshape)) return false; mxnet::TShape vshape = dshape; // vshape is the value shape on the right hand side const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); MXNET_NDIM_SWITCH(dshape.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step); for (int i = 0; i < param.begin.ndim(); ++i) { const int b = begin[i], e = end[i], s = step[i]; SetSliceOpOutputDimSize(dshape, i, b, e, s, &vshape); } }) SHAPE_ASSIGN_CHECK(*in_attrs, 1, vshape); SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape); return true; } template<typename xpu> void SliceAssignOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; CHECK_EQ(inputs.size(), 2U); // data[index] = val, data and val are two inputs CHECK_EQ(outputs.size(), 1U); if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& data = inputs[0]; const TBlob& val = inputs[1]; const TBlob& out = outputs[0]; if (req[0] == kWriteTo) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s); Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s); Copy(out, in, s); }); } else if (req[0] != kWriteInplace) { LOG(FATAL) << "_slice_assign only supports kWriteTo and kWriteInplace"; } const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); MXNET_NDIM_SWITCH(data.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step); if (zero_size_shape) { return; // slice_assign of zero-sized subspace needs no operation. } MSHADOW_TYPE_SWITCH(out.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { int num_threads = val.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= val.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads, out.dptr<DType>(), val.dptr<DType>(), out.shape_.get<ndim>(), val.shape_.get<ndim>(), begin, step); }) }) }) } struct SliceAssignScalarParam : public dmlc::Parameter<SliceAssignScalarParam> { double scalar; mxnet::Tuple<dmlc::optional<index_t>> begin, end; mxnet::Tuple<dmlc::optional<index_t>> step; DMLC_DECLARE_PARAMETER(SliceAssignScalarParam) { DMLC_DECLARE_FIELD(scalar) .set_default(0) .describe("The scalar value for assignment."); DMLC_DECLARE_FIELD(begin) .describe("starting indices for the slice operation, supports negative indices."); DMLC_DECLARE_FIELD(end) .describe("ending indices for the slice operation, supports negative indices."); DMLC_DECLARE_FIELD(step) .set_default(mxnet::Tuple<dmlc::optional<index_t>>()) .describe("step for the slice operation, supports negative values."); } }; inline bool SliceAssignScalarOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& dshape = (*in_attrs)[0]; if (!shape_is_known(dshape)) return false; SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape); return true; } template<int ndim> struct slice_assign_scalar { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType val, const OpReqType req, const mshadow::Shape<ndim> oshape, const mshadow::Shape<ndim> vshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = oshape[ndim-1]; const index_t out_last_dim_size = vshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; for (index_t j = 0; j < out_last_dim_size; ++j) { index_t irow = 0; // row id of flattend 2D out index_t stride = 1; index_t idx = i; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % vshape[k]) * step[k] + begin[k]); idx /= vshape[k]; stride *= oshape[k]; } KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val); } } }; template<typename xpu> void SliceAssignScalarOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); using namespace mshadow; Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& data = inputs[0]; const TBlob& out = outputs[0]; if (req[0] == kWriteTo) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s); Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s); Copy(out, in, s); }); } else if (req[0] != kWriteInplace) { LOG(FATAL) << "_crop_assign_scalar only supports kWriteTo and kWriteInplace"; } mxnet::TShape vshape = data.shape_; const SliceAssignScalarParam& param = nnvm::get<SliceAssignScalarParam>(attrs.parsed); MXNET_NDIM_SWITCH(data.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step); if (zero_size_shape) { return; // slice_assign of zero-sized subspaced needs no operation. } for (index_t i = 0; i < param.begin.ndim(); ++i) { const int b = begin[i], e = end[i], s = step[i]; SetSliceOpOutputDimSize(data.shape_, i, b, e, s, &vshape); } MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, { mxnet_op::Kernel<slice_assign_scalar<ndim>, xpu>::Launch(s, vshape.FlatTo2D()[0], out.dptr<DType>(), static_cast<DType>(param.scalar), req[0], out.shape_.get<ndim>(), vshape.get<ndim>(), begin, step); }) }) } struct SliceAxisParam : public dmlc::Parameter<SliceAxisParam> { int axis; index_t begin; dmlc::optional<index_t> end; DMLC_DECLARE_PARAMETER(SliceAxisParam) { DMLC_DECLARE_FIELD(axis) .describe("Axis along which to be sliced, supports negative indexes."); DMLC_DECLARE_FIELD(begin) .describe("The beginning index along the axis to be sliced, " " supports negative indexes."); DMLC_DECLARE_FIELD(end) .describe("The ending index along the axis to be sliced, " " supports negative indexes."); } }; inline void GetSliceAxisParams(const SliceAxisParam& param, const mxnet::TShape& ishape, int* axis, index_t* begin, index_t* end) { *axis = param.axis; if (*axis < 0) { *axis += ishape.ndim(); } CHECK(*axis < ishape.ndim() && *axis >= 0) << "Transformed axis must be smaller than the source ndim and larger than zero! Recieved axis=" << param.axis << ", src_ndim=" << ishape.ndim() << ", transformed axis=" << *axis; index_t axis_size = static_cast<index_t>(ishape[*axis]); *begin = param.begin; *end = -1; if (*begin < 0) { *begin += axis_size; } if (axis_size > 0) { if (!static_cast<bool>(param.end)) { *end = axis_size; } else { *end = param.end.value(); if (*end < 0) { *end += axis_size; } } CHECK(*end <= axis_size) << "Invalid end for end=" << *end << " as axis_size is " << axis_size; CHECK((*begin < *end)) << "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end; } else { *begin = 0; *end = 0; } CHECK(*end >= 0) << "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end; CHECK(*begin >= 0) << "Invalid begin for begin=" << param.begin; } inline bool SliceAxisShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape& ishape = (*in_attrs)[0]; if (!mxnet::ndim_is_known(ishape)) return false; int axis; index_t begin, end; GetSliceAxisParams(param, ishape, &axis, &begin, &end); if (!mxnet::dim_size_is_known(ishape, axis)) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape); return false; } mxnet::TShape shape(ishape.ndim(), -1); for (int i = 0; i < ishape.ndim(); ++i) { if (i == axis) { shape[i] = static_cast<index_t>(end - begin); } else { shape[i] = ishape[i]; } } SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape); return shape_is_known(shape); } template<typename xpu> void SliceAxis(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow::expr; const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); int axis; index_t begin, end; GetSliceAxisParams(param, inputs[0].shape_, &axis, &begin, &end); int ndim = outputs[0].ndim(); if (axis + 1 == ndim) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mshadow::Tensor<xpu, 2, DType> in = inputs[0].FlatTo2D<xpu, DType>(s); mshadow::Tensor<xpu, 2, DType> out = outputs[0].FlatTo2D<xpu, DType>(s); ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end)); }); } else { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mshadow::Tensor<xpu, 3, DType> in = inputs[0].FlatTo3D<xpu, DType>(axis, s); mshadow::Tensor<xpu, 3, DType> out = outputs[0].FlatTo3D<xpu, DType>(axis, s); ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end)); }); } } // Backward pass of broadcast over the given axis template<typename xpu> void SliceAxisGrad_(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { if (outputs[0].shape_.Size() == 0) { return; } const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed); using namespace mshadow::op; using namespace mshadow::expr; mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); int axis; index_t begin, end; GetSliceAxisParams(param, outputs[0].shape_, &axis, &begin, &end); int ndim = outputs[0].shape_.ndim(); if (axis + 1 == ndim) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mshadow::Tensor<xpu, 2, DType> ograd = inputs[0].FlatTo2D<xpu, DType>(s); mshadow::Tensor<xpu, 2, DType> igrad = outputs[0].FlatTo2D<xpu, DType>(s); if (req[0] == kAddTo) { slice<1>(igrad, begin, end) += F<identity>(ograd); } else if (req[0] == kWriteTo) { igrad = 0.0f; slice<1>(igrad, begin, end) = F<identity>(ograd); } else { CHECK_EQ(req[0], kNullOp); } }); } else { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mshadow::Tensor<xpu, 3, DType> ograd = inputs[0].FlatTo3D<xpu, DType>(axis, s); mshadow::Tensor<xpu, 3, DType> igrad = outputs[0].FlatTo3D<xpu, DType>(axis, s); if (req[0] == kAddTo) { slice<1>(igrad, begin, end) += F<identity>(ograd); } else if (req[0] == kWriteTo) { igrad = 0.0f; slice<1>(igrad, begin, end) = F<identity>(ograd); } else { CHECK_EQ(req[0], kNullOp); } }); } } struct SliceLikeParam : public dmlc::Parameter<SliceLikeParam> { mxnet::Tuple<int> axes; DMLC_DECLARE_PARAMETER(SliceLikeParam) { DMLC_DECLARE_FIELD(axes).set_default(mxnet::Tuple<int>()) .describe("List of axes on which input data will be sliced according to the " "corresponding size of the second input. By default will slice on " "all axes. Negative axes are supported."); } }; inline bool SliceLikeShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 2U); CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape& ishape = (*in_attrs)[0]; mxnet::TShape& from_shape = (*in_attrs)[1]; if (!mxnet::ndim_is_known(ishape) || !mxnet::ndim_is_known(from_shape)) { return false; } if (param.axes.ndim() == 0) { CHECK_EQ(ishape.ndim(), from_shape.ndim()) << "By default slice_axis performs slice on all axes, but ndim mismatch " "for inputs: " << ishape.ndim() << " vs. " << from_shape.ndim(); for (int i = 0; i < ishape.ndim(); ++i) { CHECK_GE(ishape[i], from_shape[i]) << "Slice axis " << i << " with size " << from_shape[i] << "exceeds limit of input with size " << ishape[i]; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, from_shape); } else { mxnet::TShape shape(ishape); for (int i = 0; i < param.axes.ndim(); ++i) { int axis = param.axes[i]; if (axis < 0) { axis += ishape.ndim(); } CHECK_GE(axis, 0) << "Slice axis: " << param.axes[i] << " too small"; CHECK_GT(ishape.ndim(), axis) << "Slice axis: " << axis << " exceeds first input: " << ishape.ndim(); CHECK_GT(from_shape.ndim(), axis) << "Slice axis: " << axis << " exceeds second input: " << from_shape.ndim(); shape[axis] = from_shape[axis]; CHECK_GE(ishape[axis], from_shape[axis]) << "Slice axis " << axis << " with size " << from_shape[axis] << "exceeds limit of input with size " << ishape[axis]; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape); } return true; } inline void SliceLikeInferRanges(const mxnet::TShape& dshape, const mxnet::TShape& fshape, const mxnet::Tuple<int>& axes, mxnet::Tuple<dmlc::optional<index_t>>* param_begin, mxnet::Tuple<dmlc::optional<index_t>>* param_end, mxnet::Tuple<dmlc::optional<index_t>>* param_step) { std::vector<dmlc::optional<index_t>> pb(dshape.ndim()); std::vector<dmlc::optional<index_t>> pe(dshape.ndim()); std::vector<dmlc::optional<index_t>> ps(dshape.ndim()); if (axes.ndim() == 0) { for (int i = 0; i < dshape.ndim(); ++i) { pb[i] = 0; pe[i] = fshape[i]; ps[i] = 1; } } else { for (int i = 0; i < axes.ndim(); ++i) { int axis = axes[i]; if (axis < 0) { axis += dshape.ndim(); } CHECK_GE(axis, 0) << "Slice axis: " << axes[i] << " too small"; CHECK_LT(axis, dshape.ndim()) << "Slice axis: " << axis << " exceeds first input: " << dshape.ndim(); CHECK_LT(axis, fshape.ndim()) << "Slice axis: " << axis << " exceeds first input: " << fshape.ndim(); pb[axis] = 0; pe[axis] = fshape[axis]; ps[axis] = 1; } } *param_begin = mxnet::Tuple<dmlc::optional<index_t>>(pb.begin(), pb.end()); *param_end = mxnet::Tuple<dmlc::optional<index_t>>(pe.begin(), pe.end()); *param_step = mxnet::Tuple<dmlc::optional<index_t>>(ps.begin(), ps.end()); } template<typename xpu> void SliceLikeForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); using namespace mshadow::expr; const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& data = inputs[0]; const TBlob& out = outputs[0]; const mxnet::TShape& ishape = data.shape_; const mxnet::TShape& from_shape = inputs[1].shape_; mxnet::Tuple<dmlc::optional<index_t>> param_begin; mxnet::Tuple<dmlc::optional<index_t>> param_end; mxnet::Tuple<dmlc::optional<index_t>> param_step; SliceLikeInferRanges(ishape, from_shape, param.axes, &param_begin, &param_end, &param_step); MXNET_NDIM_SWITCH(data.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(data.shape_, param_begin, param_end, param_step, &begin, &end, &step); MSHADOW_TYPE_SWITCH(out.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { int num_threads = out.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= out.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s, num_threads, out.dptr<DType>(), data.dptr<DType>(), data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step); }) }) }) } template<typename xpu> void SliceLikeBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 2U); CHECK_EQ(req.size(), 2U); using namespace mshadow; Stream<xpu>* s = ctx.get_stream<xpu>(); if (req[1] != kNullOp && req[1] != kAddTo) { Fill(s, outputs[1], req[1], 0); // Second input not relavant to gradients. } if (req[0] == kNullOp) return; const TBlob& ograd = inputs[0]; const TBlob& igrad = outputs[0]; const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed); if (req[0] == kWriteTo) { Fill(s, igrad, req[0], 0); } else if (req[0] == kWriteInplace) { LOG(FATAL) << "_slice_like_backward does not support kWriteInplace"; } const mxnet::TShape& ishape = ograd.shape_; const mxnet::TShape& from_shape = outputs[1].shape_; mxnet::Tuple<dmlc::optional<index_t>> param_begin; mxnet::Tuple<dmlc::optional<index_t>> param_end; mxnet::Tuple<dmlc::optional<index_t>> param_step; SliceLikeInferRanges(ishape, from_shape, param.axes, &param_begin, &param_end, &param_step); MXNET_NDIM_SWITCH(ograd.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(ograd.shape_, param_begin, param_end, param_step, &begin, &end, &step); MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { int num_threads = ograd.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= ograd.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads, igrad.dptr<DType>(), ograd.dptr<DType>(), igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step); }) }) }) } struct ClipParam : public dmlc::Parameter<ClipParam> { real_t a_min, a_max; DMLC_DECLARE_PARAMETER(ClipParam) { DMLC_DECLARE_FIELD(a_min) .describe("Minimum value"); DMLC_DECLARE_FIELD(a_max) .describe("Maximum value"); } }; struct clip { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* datas, const float a_min, const float a_max) { DType data = datas[i]; if (data > a_max) { out[i] = a_max; } else if (data < a_min) { out[i] = a_min; } else { out[i] = data; } } }; struct clip_grad { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* grad, const DType* datas, const float a_min, const float a_max) { DType data = datas[i]; if (data > a_max) { out[i] = 0; } else if (data < a_min) { out[i] = 0; } else { out[i] = grad[i]; } } }; template<typename xpu> void Clip(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed); CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_); Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mxnet_op::Kernel<mxnet::op::clip, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), param.a_min, param.a_max); }); } template<typename xpu> void ClipEx(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { CHECK_EQ(inputs[0].dtype(), outputs[0].dtype()); CHECK_EQ(inputs[0].storage_type(), outputs[0].storage_type()); CHECK_NE(inputs[0].storage_type(), kDefaultStorage); UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Clip<xpu>); } template<typename xpu> void ClipGrad_(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mxnet_op; const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed); CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_); Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Kernel<clip_grad, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), param.a_min, param.a_max); }); } /*! * \brief The parameters of the repeat operator include * the number of repeating time and axis (optional). * The parameters will be later used to deduce the * output ndarray shape in bool RepeatShape() function. */ struct RepeatParam : public dmlc::Parameter<RepeatParam> { int repeats = 1; dmlc::optional<int> axis; DMLC_DECLARE_PARAMETER(RepeatParam) { DMLC_DECLARE_FIELD(repeats) .describe("The number of repetitions for each element."); DMLC_DECLARE_FIELD(axis) .set_default(dmlc::optional<int>()) .describe("The axis along which to repeat values." " The negative numbers are interpreted counting from the backward." " By default, use the flattened input array," " and return a flat output array."); } }; /*! * \brief Helper function for getting user input params for the operator repeat. * Sanity check the user input values. */ inline void GetRepeatParams(const RepeatParam& param, const mxnet::TShape& ishape, int* repeats, dmlc::optional<int>* axisOpt) { *repeats = param.repeats; CHECK_GE(*repeats, 0) << "repeats cannot be a negative number"; *axisOpt = param.axis; if (static_cast<bool>(*axisOpt)) { int ndims = ishape.ndim(); int axis = axisOpt->value(); if (axis < 0) { axis += ndims; } CHECK(axis >= 0 && axis < ndims) << "axis = " << axisOpt->value() << " out of bounds"; } } inline bool RepeatOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& ishape = (*in_attrs)[0]; if (!mxnet::ndim_is_known(ishape)) { return false; } int repeats = 0; dmlc::optional<int> axisOpt; GetRepeatParams(param, ishape, &repeats, &axisOpt); // If 0 repeats, return an empty 1-dim, 0-size array if (0 == repeats) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(1, 0)); return true; } // If repeats > 0, multiply the size of the corresponding axis by repeats if (static_cast<bool>(axisOpt)) { int ndims = ishape.ndim(); int axis = axisOpt.value(); if (axis < 0) { axis += ndims; } mxnet::TShape shape(ishape.ndim(), -1); for (int i = 0; i < ishape.ndim(); ++i) { if (i == axis) { shape[i] = repeats * ishape[i]; } else { shape[i] = ishape[i]; } } SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape); } else { // If axis is not input by user, return a flat 1D array of size = in.size*repeats mxnet::TShape shape(1, ishape.Size() * repeats); SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape); } return shape_is_known(out_attrs->at(0)); } inline bool RepeatOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); if ((*in_attrs)[0] != -1) { TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]); } else if ((*out_attrs)[0] != -1) { TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]); } return true; } /*! * \brief Reshape the input and output tensors for * using broadcast_to to achieve the funcitonality * of operator repeat. * \return a pair of mxnet::TShape's, first is the reshaped * input shape, second is the reshaped output shape. */ inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForRepeatOp( const mxnet::TShape& ishape, const dmlc::optional<int>& axisOpt, const int repeats) { if (static_cast<bool>(axisOpt)) { int axis = axisOpt.value(); int ndim = ishape.ndim(); if (axis < 0) { axis += ndim; } CHECK(axis >= 0 && axis < ishape.ndim()) << "Invalid input of axis"; // reshape the input tensor by adding a dim at the (axis+1)-th dim mxnet::TShape rshape(ishape.ndim()+1, 1); // the shape we want to broadcast to mxnet::TShape bshape(rshape.ndim(), 1); int i = 0; while (i <= axis) { rshape[i] = bshape[i] = ishape[i]; ++i; } rshape[i] = 1; bshape[i] = repeats; while (i < ishape.ndim()) { rshape[i+1] = ishape[i]; bshape[i+1] = ishape[i]; ++i; } return std::make_pair(rshape, bshape); } else { // axis is not input by user // reshape the tensor into shape (ishape.Size(), 1) // then add one dim at axis = 1 and broadcast to // shape (ishape.Size(), repeats) mxnet::TShape rshape(2, 1); rshape[0] = ishape.Size(); rshape[1] = 1; mxnet::TShape bshape(2, 1); bshape[0] = rshape[0]; bshape[1] = repeats; return std::make_pair(rshape, bshape); } } template<typename xpu> void RepeatOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const TBlob& iTBlob = inputs[0]; const mxnet::TShape& ishape = iTBlob.shape_; if (!shape_is_known(ishape)) return; int repeats = 0; dmlc::optional<int> axisOpt; const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed); GetRepeatParams(param, ishape, &repeats, &axisOpt); if (0 == repeats) return; std::pair<mxnet::TShape, mxnet::TShape> rshapes = \ ReshapeInputOutputForRepeatOp(ishape, axisOpt, repeats); // reshaped input tblob TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(), inputs[0].type_flag_, inputs[0].dev_id()); std::vector<TBlob> newInputs = {iblob}; // reshaped output tblob TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(), outputs[0].type_flag_, outputs[0].dev_id()); std::vector<TBlob> newOutputs = {oblob}; BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs); } /*! * \brief Compute the gradient of the loss function * with respect to the input of the operator. * Backpropagation is employed to implement the * chain rule. * \param inputs the gradient of the loss function * with respect to the outputs of the operator * \param outputs the gradient of the loss function * with respect to the inputs of the operator */ template<typename xpu> void RepeatOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); const mxnet::TShape& oshape = outputs[0].shape_; if (!shape_is_known(oshape)) return; int repeats = 0; dmlc::optional<int> axisOpt; const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed); GetRepeatParams(param, oshape, &repeats, &axisOpt); if (0 == repeats) return; std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForRepeatOp(oshape, axisOpt, repeats); // reshaped output grad tblob TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(), outputs[0].type_flag_, outputs[0].dev_id()); std::vector<TBlob> newOutputs = {oblob}; // reshaped input grad tblob TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(), inputs[0].type_flag_, inputs[0].dev_id()); std::vector<TBlob> newInputs = {iblob}; if constexpr (std::is_same<xpu, mshadow::cpu>::value) { ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>( ctx, newInputs, req, newOutputs, rshapes.first); } else { #if MXNET_USE_CUDA ReduceAxesRTCComputeImpl(ctx, newInputs, req, newOutputs, rshapes.first, "red::sum{}", false); #endif } } struct TileParam : public dmlc::Parameter<TileParam> { mxnet::Tuple<int> reps; DMLC_DECLARE_PARAMETER(TileParam) { DMLC_DECLARE_FIELD(reps) .describe("The number of times for repeating the tensor a. Each dim size of reps" " must be a positive integer." " If reps has length d, the result will have dimension of max(d, a.ndim);" " If a.ndim < d, a is promoted to be d-dimensional by prepending new axes." " If a.ndim > d, reps is promoted to a.ndim by pre-pending 1's to it."); } }; inline bool TileOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); const TileParam& param = nnvm::get<TileParam>(attrs.parsed); const mxnet::TShape& ishape = (*in_attrs)[0]; if (!shape_is_known(ishape)) { return false; } const mxnet::Tuple<int>& reps = param.reps; // If reps is empty, return a identical input array if (reps.ndim() == 0) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape); return true; } mxnet::TShape oshape(std::max(ishape.ndim(), reps.ndim()), -1); int i1 = ishape.ndim() - 1; int i2 = reps.ndim() - 1; for (int i = oshape.ndim() - 1; i >= 0; --i) { if (i1 >= 0 && i2 >= 0) { oshape[i] = ishape[i1--] * reps[i2--]; } else if (i1 >= 0) { oshape[i] = ishape[i1--]; } else if (i2 >= 0) { oshape[i] = reps[i2--]; } } // If reps contains 0s, oshape is a zero-size shape. // Need to distinguish between np_shape mode and legacy mode. if (!Imperative::Get()->is_np_shape()) { common::ConvertToNumpyShape(&oshape); } SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); return shape_is_known(oshape); } inline bool TileOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); if ((*in_attrs)[0] != -1) { TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]); } else if ((*out_attrs)[0] != -1) { TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]); } return true; } /*! * \brief Reshape the input and output tensors for * using broadcast_to to achieve the functionality * of operator tile. * \return a pair of mxnet::TShape's, first is the reshaped * input shape, second is the reshaped output shape. */ inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForTileOp( const mxnet::TShape& ishape, const mxnet::Tuple<int>& reps) { if (reps.ndim() == 0) { return std::make_pair(ishape, ishape); } // The shape we want to broadcast to mxnet::TShape bshape(std::max(ishape.ndim(), reps.ndim()) * 2, 1); // The shape of the input tensor after adding new axes before each dim mxnet::TShape rshape(bshape.ndim(), 1); int i1 = ishape.ndim() - 1; int i2 = reps.ndim() - 1; for (int i = bshape.ndim() - 1; i >= 0; --i) { if (0 == (i & 1)) { bshape[i] = (i2 >= 0? reps[i2--] : 1); rshape[i] = 1; } else { rshape[i] = bshape[i] = (i1 >= 0? ishape[i1--] : 1); } } return std::make_pair(rshape, bshape); } /*! * \brief Implementation of tiling the input tensor a based * on the user-input shape, reps. * If a.ndim < reps.ndim, new axes are pre-pended to a. For example, * the input tensor has shape (3,), and the reps is (2, 4); the input * tensor would be reshaped to (1, 3). * If a.ndim > reps.ndim, pre-pending 1's to reps. For example, * the input tensor has shape (2, 3, 4, 5), and reps is (2, 2); * the reps would be changed to (1, 1, 2, 2). * Suppose we have a.ndim = reps.ndim now. To achieve tiling, * we utilize the operator broadcast_to. For example, for a tensor * of shape (2, 3, 4, 5) and reps (2, 8, 9, 3), we first reshape * the tensor to the shape (1, 2, 1, 3, 1, 4, 1, 5) by adding * one axis before each dimension. Then, we want to broadcast * the new tensor to shape (2, 2, 8, 3, 9, 4, 3, 5). The final * output tensor would have shape (2*2, 8*3, 9*4, 3*5). */ template<typename xpu> void TileOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); if (inputs[0].Size() == 0) return; const mxnet::TShape& ishape = inputs[0].shape_; const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps; // If any one of the number in reps is zero, return immediately for (int i = 0; i < reps.ndim(); ++i) { if (0 == reps[i]) return; } std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(ishape, reps); // reshaped input tblob TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(), inputs[0].type_flag_, inputs[0].dev_id()); std::vector<TBlob> newInputs = {iblob}; // reshaped output tblob TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(), outputs[0].type_flag_, outputs[0].dev_id()); std::vector<TBlob> newOutputs = {oblob}; BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs); } /*! * \brief Compute the gradient of the loss function * with respect to the input of the operator. * Backpropagation is employed to implement the * chain rule. * \param inputs the gradient of the loss function * with respect to the outputs of the operator * \param outputs the gradient of the loss function * with respect to the inputs of the operator */ template<typename xpu> void TileOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); if (inputs[0].Size() == 0) return; const mxnet::TShape& oshape = outputs[0].shape_; const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps; // If any one of the number in reps is zero, return immediately for (int i = 0; i < reps.ndim(); ++i) { if (0 == reps[i]) return; } std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(oshape, reps); // reshaped output grad tblob TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(), outputs[0].type_flag_, outputs[0].dev_id()); std::vector<TBlob> newOutputs = {oblob}; // reshaped input grad tblob TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(), inputs[0].type_flag_, inputs[0].dev_id()); std::vector<TBlob> newInputs = {iblob}; if constexpr (std::is_same<xpu, mshadow::cpu>::value) { ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>( ctx, newInputs, req, newOutputs, rshapes.first); } else { #if MXNET_USE_CUDA ReduceAxesRTCComputeImpl(ctx, newInputs, req, newOutputs, rshapes.first, "red::sum{}", false); #endif } } struct ReverseParam : public dmlc::Parameter<ReverseParam> { mxnet::Tuple<int> axis; DMLC_DECLARE_PARAMETER(ReverseParam) { DMLC_DECLARE_FIELD(axis) .describe("The axis which to reverse elements."); } }; #define REVERSE_MAX_DIM 10U struct reverse { MSHADOW_XINLINE static index_t ReverseIndex(index_t idx, index_t nreversedim, const index_t * stride_, const index_t * trailing_) { index_t outputIndex = idx; for (index_t i = 0; i < nreversedim; ++i) { const index_t low = outputIndex % trailing_[i]; index_t high = outputIndex / trailing_[i]; const index_t x = high%stride_[i]; high /= stride_[i]; outputIndex = (high*stride_[i] + stride_[i] - 1 - x)*trailing_[i] + low; } return outputIndex; } #ifdef __CUDACC__ template<typename DType> __device__ static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst, const index_t * stride_, const index_t * trailing_) { __shared__ index_t stride_share[REVERSE_MAX_DIM]; __shared__ index_t trailing_share[REVERSE_MAX_DIM]; if (threadIdx.x < REVERSE_MAX_DIM) { stride_share[threadIdx.x] = stride_[threadIdx.x]; trailing_share[threadIdx.x] = trailing_[threadIdx.x]; } __syncthreads(); index_t new_idx = ReverseIndex(index, nreversedim, stride_share, trailing_share); dst[new_idx] = src[index]; } #else template<typename DType> MSHADOW_XINLINE static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst, const index_t * stride_, const index_t * trailing_) { index_t new_idx = ReverseIndex(index, nreversedim, stride_, trailing_); dst[new_idx] = src[index]; } #endif }; template<typename xpu> void ReverseOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mxnet_op; const ReverseParam& param = nnvm::get<ReverseParam>(attrs.parsed); CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_); CHECK_LT(param.axis.ndim(), REVERSE_MAX_DIM); Stream<xpu> *s = ctx.get_stream<xpu>(); const mxnet::TShape& ishape = inputs[0].shape_; std::vector<index_t> stride_(param.axis.ndim()); std::vector<index_t> trailing_(param.axis.ndim()); index_t reverse_index = 0; for (int axis : param.axis) { CHECK_LT(axis, ishape.ndim()); stride_[reverse_index] = ishape[axis]; trailing_[reverse_index] = 1; for (int i2 = axis + 1; i2 < ishape.ndim(); ++i2) { trailing_[reverse_index] *= ishape[i2]; } reverse_index++; } #ifdef __CUDACC__ auto stride_workspace = AllocMemory<xpu, index_t>(ctx, 2 * reverse_index); auto trailing_workspace = stride_workspace + reverse_index * sizeof(index_t); cudaMemcpyAsync(stride_workspace, thrust::raw_pointer_cast(stride_.data()), stride_.size() * sizeof(index_t), cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s)); cudaMemcpyAsync(trailing_workspace, thrust::raw_pointer_cast(trailing_.data()), trailing_.size() * sizeof(index_t), cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s)); #endif #ifdef __CUDACC__ MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index, inputs[0].dptr<DType>(), outputs[0].dptr<DType>(), reinterpret_cast<index_t*>(stride_workspace), reinterpret_cast<index_t*>(trailing_workspace)); }); #else MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index, inputs[0].dptr<DType>(), outputs[0].dptr<DType>(), stride_.data(), trailing_.data()); }); #endif } struct StackParam : public dmlc::Parameter<StackParam> { int axis; int num_args; DMLC_DECLARE_PARAMETER(StackParam) { DMLC_DECLARE_FIELD(axis) .set_default(0) .describe("The axis in the result array along which the input arrays are stacked."); DMLC_DECLARE_FIELD(num_args).set_lower_bound(1) .describe("Number of inputs to be stacked."); } }; inline bool StackOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const StackParam& param = dmlc::get<StackParam>(attrs.parsed); mxnet::TShape dshape; for (const mxnet::TShape& i : (*in_attrs)) { shape_assign(&dshape, i); } if (!shape_is_known(dshape)) return false; mxnet::TShape oshape(dshape.ndim() + 1, -1); int axis = CheckAxis(param.axis, oshape.ndim()); for (int i = 0; i < axis; ++i) { oshape[i] = dshape[i]; } oshape[axis] = param.num_args; for (index_t i = axis + 1; i < oshape.ndim(); ++i) { oshape[i] = dshape[i-1]; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); return shape_is_known(oshape); } template<typename xpu> void StackOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; const StackParam& param = dmlc::get<StackParam>(attrs.parsed); int axis = CheckAxis(param.axis, outputs[0].ndim()); Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, { std::vector<Tensor<xpu, 3, DType> > data(inputs.size()); Tensor<xpu, 3, DType> out; size_t leading = 1, trailing = 1; for (int i = 0; i < axis; ++i) { leading *= outputs[0].shape_[i]; } for (int i = axis + 1; i < outputs[0].ndim(); ++i) { trailing *= outputs[0].shape_[i]; } size_t mid = outputs[0].shape_[axis]; Shape<3> oshape = Shape3(leading, mid, trailing); out = outputs[0].get_with_shape<xpu, 3, DType>(oshape, s); for (size_t i = 0; i < inputs.size(); ++i) { Shape<3> dshape = Shape3(leading, 1, trailing); data[i] = inputs[i].get_with_shape<xpu, 3, DType>(dshape, s); } Concatenate(data, &out, 1, req[0]); }) } template<typename xpu> void StackOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; const StackParam& param = dmlc::get<StackParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, { std::vector<Tensor<xpu, 3, DType> > grad_in(outputs.size()); Tensor<xpu, 3, DType> grad; size_t leading = 1, trailing = 1; for (int i = 0; i < axis; ++i) { leading *= inputs[0].shape_[i]; } for (int i = axis + 1; i < inputs[0].ndim(); ++i) { trailing *= inputs[0].shape_[i]; } size_t mid = inputs[0].shape_[axis]; Shape<3> oshape = Shape3(leading, mid, trailing); grad = inputs[0].get_with_shape<xpu, 3, DType>(oshape, s); for (size_t i = 0; i < outputs.size(); ++i) { Shape<3> dshape = Shape3(leading, 1, trailing); grad_in[i] = outputs[i].get_with_shape<xpu, 3, DType>(dshape, s); } Split(grad, &grad_in, 1, req); }) } struct SqueezeParam : public dmlc::Parameter<SqueezeParam> { dmlc::optional<mxnet::Tuple<int>> axis; DMLC_DECLARE_PARAMETER(SqueezeParam) { DMLC_DECLARE_FIELD(axis) .set_default(dmlc::optional<mxnet::Tuple<int>>()) .describe("Selects a subset of the single-dimensional entries in the shape." " If an axis is selected with shape entry greater than one, an error is raised."); } }; // Given a shape that may have dim size equal to 0, // move all the zeros to the last of the shape array // and keep the relative order of the non-zero values. // Returns the new shape size after moving all zeros to the end. inline size_t SqueezeShapeHelper(mxnet::TShape* shape) { CHECK(shape != nullptr); size_t count = 0; for (int i = 0; i < shape->ndim(); ++i) { if ((*shape)[i] == -1) { ++count; } else { std::swap((*shape)[i], (*shape)[i-count]); } } return shape->ndim() - count; } inline bool SqueezeShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const SqueezeParam& param = nnvm::get<SqueezeParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]"; CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& dshape = in_attrs->at(0); const int dndim = dshape.ndim(); if (!shape_is_known(dshape)) return false; mxnet::TShape oshape = dshape; if (param.axis.has_value()) { // preprocess axis mxnet::Tuple<int> axes = param.axis.value(); for (int i = 0; i < axes.ndim(); ++i) { if (axes[i] < 0) { axes[i] += dndim; CHECK_GE(axes[i], 0) << "axis " << axes[i] - dndim << " is out of bounds for array of dimension " << dndim; } CHECK_LT(axes[i], dndim) << "axis " << axes[i] << " is out of bounds for array of dimension " << dndim; CHECK_EQ(dshape[axes[i]], 1) << "cannot select an axis to squeeze out which has size=" << dshape[axes[i]] << " not equal to one"; CHECK_NE(oshape[axes[i]], -1) << "duplicate value in axis"; oshape[axes[i]] = -1; } } else { for (int i = 0; i < oshape.ndim(); ++i) { if (oshape[i] == 1) oshape[i] = -1; } } size_t oshape_size = SqueezeShapeHelper(&oshape); if (oshape_size == 0) { // corner case when dshape is (1, 1, 1, 1) oshape[0] = 1; oshape_size = 1; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(oshape.data(), oshape.data()+oshape_size)); return true; } struct DepthToSpaceParam : public dmlc::Parameter<DepthToSpaceParam> { int block_size; DMLC_DECLARE_PARAMETER(DepthToSpaceParam) { DMLC_DECLARE_FIELD(block_size) .describe("Blocks of [block_size. block_size] are moved"); } }; inline bool DepthToSpaceOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Depth To Space requires exactly 4D tensor"; mxnet::TShape expected_out(4, -1); mxnet::TShape& in_shape = in_attrs->at(0); if (!mxnet::ndim_is_known(in_shape)) { return false; } int block = param.block_size; CHECK_NE(block, 0) << "block_size must be a positive integer value"; CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0"; CHECK_EQ(in_shape[1] % (block * block), 0) << "Cannot perform Depth To Space operation on the specified tensor." " Dimension:1(depth dimension) should be a multiple of 'block^2'"; CHECK_NE(in_shape[0], 0) << "Operation requires a 4D tensor. Size of dimension:0 cannot be 0"; CHECK_NE(in_shape[2], 0) << "Operation requires a 4D tensor. Size of dimension:2 cannot be 0"; CHECK_NE(in_shape[3], 0) << "Operation requires a 4D tensor. Size of dimension:3 cannot be 0"; expected_out[0] = in_shape[0]; expected_out[1] = in_shape[1] / (block * block); int i = 2; while (i < expected_out.ndim()) { expected_out[i] = in_shape[i] * block; ++i; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out); return shape_is_known(expected_out); } inline bool DepthToSpaceOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0)); return out_attrs->at(0) != -1; } /*! * \brief This function updates the value of input index from where the data element * needs to be fetched and written out to the ith location in output tensor * \param index_position index within offset array to get offset of given dimension * \param dim_size size of current dimension * \param idx output tensor index * \param inp_index index within input tensor from where value is retrieved * \param offset_arr array containing the linear offset of input tensor */ MSHADOW_XINLINE void update_index(index_t index_position, index_t dim_size, index_t *idx, index_t *inp_index, const index_t* offset_arr) { index_t next_idx_val = *idx / dim_size; *inp_index += (*idx - next_idx_val * dim_size) * offset_arr[index_position]; *idx = next_idx_val; } /*! * \brief This function performs the tensor transpose (0, 1, 2, 3, 4, 5) -> * (0, 3, 4, 1, 5, 2) by computing linear index within input tensor to be mapped * to the ith index of output tensor * \param i tensor index * \param out_data output tensor * \param in_data input tensor * \param block size of chunks to be moved out of depth dimension * \param size array containing the size of each dimension of input tensor * \param offset_arr array containing the linear offset of input tensor */ template<int req> struct depth_to_space_forward { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data, const int block, const index_t* size, const index_t* offset_arr) { index_t inp_index = 0, idx = i, dim_size; dim_size = block; update_index(2, dim_size, &idx, &inp_index, offset_arr); dim_size = size[3]; update_index(5, dim_size, &idx, &inp_index, offset_arr); dim_size = block; update_index(1, dim_size, &idx, &inp_index, offset_arr); dim_size = size[2]; update_index(4, dim_size, &idx, &inp_index, offset_arr); dim_size = size[1] / (block * block); update_index(3, dim_size, &idx, &inp_index, offset_arr); dim_size = size[0]; update_index(0, dim_size, &idx, &inp_index, offset_arr); KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]); } }; /*! * \brief This function calculates the linear offset for each dimension of * input tensor and stores them in an array, which is later used in * performing depth_to_space operation * \param i global thread id * \param offset_arr array to be populated with offset values * \param size array to be populated with size of each dimension of input tensor * \param block size of chunks to be moved out of depth dimension * \param size0 size of Dim 0 of input tensor * \param size1 size of Dim 1 of input tensor * \param size2 size of Dim 2 of input tensor * \param size3 size of Dim 3 of input tensor */ template<int req> struct compute_offset_for_depth_to_space { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block, const index_t size0, const index_t size1, const index_t size2, const index_t size3) { size[0] = size0; size[1] = size1; size[2] = size2; size[3] = size3; offset_arr[5] = 1; offset_arr[4] = offset_arr[5] * size[3]; offset_arr[3] = offset_arr[4] * size[2]; offset_arr[2] = offset_arr[3] * size[1] / (block * block); offset_arr[1] = offset_arr[2] * block; offset_arr[0] = offset_arr[1] * block; } }; template<typename xpu> void DepthToSpaceOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& in_data = inputs[0]; const TBlob& out_data = outputs[0]; const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed); using namespace mxnet_op; int block = param.block_size; mshadow::Tensor<xpu, 1, char> workspace = ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s); char* workspace_curr_ptr = workspace.dptr_; index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr); index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6); MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { Kernel<compute_offset_for_depth_to_space<req_type>, xpu>::Launch( s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1], in_data.shape_[2], in_data.shape_[3]); Kernel<depth_to_space_forward<req_type>, xpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(), block, size, offset_arr); }); }); } inline bool SpaceToDepthOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Space To Depth requires exactly 4D tensor"; mxnet::TShape expected_out(in_attrs->at(0).ndim(), -1); mxnet::TShape& in_shape = in_attrs->at(0); if (!mxnet::ndim_is_known(in_shape)) { return false; } int block = param.block_size; CHECK_NE(block, 0) << "block_size must be a positive integer value"; CHECK_NE(in_shape[0], 0) << "Operation requires a 4D tensor. Size of dimension:0 cannot be 0"; CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0"; CHECK_NE(in_shape[2], 0) << "Operation requires a 4D tensor. Size of dimension:2 cannot be 0"; CHECK_EQ(in_shape[2] % block, 0) << "Cannot perform Depth To Space operation on the specified tensor." " Dimension:2(1st Space dimension) should be a multiple of 'block' "; CHECK_NE(in_shape[3], 0) << "Operation requires a 4D tensor. Size of dimension:3 cannot be 0"; CHECK_EQ(in_shape[3] % block, 0) << "Cannot perform Depth To Space operation on the specified tensor." " Dimension:3(2nd space dimension) should be a multiple of 'block' "; expected_out[0] = in_shape[0]; expected_out[1] = in_shape[1] * block * block; int i = 2; while (i < expected_out.ndim()) { expected_out[i] = in_shape[i] / block; ++i; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out); return shape_is_known(expected_out); } inline bool SpaceToDepthOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0)); return out_attrs->at(0) != -1; } /*! * \brief This function preforms the tensor transpose (0, 1, 2, 3, 4, 5) -> * (0, 3, 5, 1, 2, 4) by computing linear index within input tensor to be mapped * to the ith index of output tensor * \param i tensor index * \param out_data output tensor * \param in_data input tensor * \param block size of chunks to be moved out of depth dimension * \param size array containing the size of each dimension of input tensor * \param offset_arr array containing the linear offset of input tensor */ template<int req> struct space_to_depth_forward { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data, const int block, const index_t* size, const index_t* offset_arr) { index_t inp_index = 0, idx = i, dim_size; dim_size = size[3] / block; update_index(4, dim_size, &idx, &inp_index, offset_arr); dim_size = size[2] / block; update_index(2, dim_size, &idx, &inp_index, offset_arr); dim_size = size[1]; update_index(1, dim_size, &idx, &inp_index, offset_arr); dim_size = block; update_index(5, dim_size, &idx, &inp_index, offset_arr); dim_size = block; update_index(3, dim_size, &idx, &inp_index, offset_arr); dim_size = size[0]; update_index(0, dim_size, &idx, &inp_index, offset_arr); KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]); } }; /*! * \brief This function calculates the linear offset for each dimension of * input tensor and stores them in an array, which is later used in * performing space_to_depth operation * \param i global thread id * \param offset_arr array to be populated with offset values * \param size array to be populated with size of each dimension of input tensor * \param block size of chunks to be moved out of depth dimension * \param size0 size of Dim 0 of input tensor * \param size1 size of Dim 1 of input tensor * \param size2 size of Dim 2 of input tensor * \param size3 size of Dim 3 of input tensor */ template<int req> struct compute_offset_for_space_to_depth { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block, const index_t size0, const index_t size1, const index_t size2, const index_t size3) { size[0] = size0; size[1] = size1; size[2] = size2; size[3] = size3; offset_arr[5] = 1; offset_arr[4] = offset_arr[5] * block; offset_arr[3] = offset_arr[4] * size[3] / block; offset_arr[2] = offset_arr[3] * block; offset_arr[1] = offset_arr[2] * size[2] / block; offset_arr[0] = offset_arr[1] * size[1]; } }; template<typename xpu> void SpaceToDepthOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& in_data = inputs[0]; const TBlob& out_data = outputs[0]; const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed); using namespace mxnet_op; int block = param.block_size; mshadow::Tensor<xpu, 1, char> workspace = ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s); char* workspace_curr_ptr = workspace.dptr_; index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr); index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6); MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { Kernel<compute_offset_for_space_to_depth<req_type>, xpu>::Launch( s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1], in_data.shape_[2], in_data.shape_[3]); Kernel<space_to_depth_forward<req_type>, xpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(), block, size, offset_arr); }); }); } namespace split_enum { enum SplitOpInputs {kData}; } // namespace split_enum struct SplitParam : public dmlc::Parameter<SplitParam> { mxnet::TShape indices; int axis; bool squeeze_axis; int sections; DMLC_DECLARE_PARAMETER(SplitParam) { DMLC_DECLARE_FIELD(indices) .describe("Indices of splits. The elements should denote the boundaries of at which split" " is performed along the `axis`."); DMLC_DECLARE_FIELD(axis).set_default(1) .describe("Axis along which to split."); DMLC_DECLARE_FIELD(squeeze_axis).set_default(0) .describe("If true, Removes the axis with length 1 from the shapes of the output arrays." " **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1" " only along the `axis` which it is split." " Also `squeeze_axis` can be set to ``true``" " only if ``input.shape[axis] == num_outputs``."); DMLC_DECLARE_FIELD(sections).set_default(0) .describe("Number of sections if equally splitted. Default to 0 which means split by indices."); } }; // struct SplitParam inline mxnet::TShape GetSplitIndices(const mxnet::TShape& ishape, int axis, int sections) { mxnet::TShape indices(sections+1, -1); indices[0] = 0; int64_t section_size_b = (int64_t) (ishape[axis] / sections); int64_t section_size_a = section_size_b + 1; int section_a = ishape[axis] % sections; for (int i = 0; i < sections; ++i) { if ( i < section_a ) { indices[i+1] = section_size_a * (i + 1); } else { indices[i+1] = section_size_b + indices[i]; } } return indices; } inline bool SplitOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); int dtype = (*in_attrs)[0]; CHECK_NE(dtype, -1) << "First input must have specified type"; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); out_attrs->clear(); int num_outputs = (param.sections > 0) ? param.sections : param.indices.ndim(); for (int i = 0; i < num_outputs; ++i) { out_attrs->push_back(dtype); } return true; } inline bool SplitOpShapeImpl(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs, const int real_axis) { using namespace mshadow; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); mxnet::TShape dshape = in_attrs->at(split_enum::kData); mxnet::TShape ishape = in_attrs->at(split_enum::kData); const mxnet::TShape indices = (param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices; int num_outputs = (param.sections > 0) ? indices.ndim() - 1 : indices.ndim(); // Pre-compute squeezed output shape for future usage mxnet::TShape squeezed_dshape = dshape; for (int d = real_axis; d < squeezed_dshape.ndim() - 1; ++d) { squeezed_dshape[d] = squeezed_dshape[d+1]; } squeezed_dshape = mxnet::TShape(&squeezed_dshape[0], &squeezed_dshape[squeezed_dshape.ndim()-1]); // Assign shape to every output for (int i = 0; i < num_outputs; ++i) { int start = indices[i]; int end = (i < num_outputs - 1) ? indices[i + 1] : ishape[real_axis]; if (ishape[real_axis] == 0U) { end = start; } else { CHECK(start <= end) << "start " << start << " is not less than end " << end << "for subarray " << i; CHECK(end <= ishape[real_axis]) << "end " << end << " is no less than the size of the axis " << ishape[real_axis]; } dshape[real_axis] = (end - start); if (param.squeeze_axis) { CHECK_EQ(end - start, 1U) << "expected axis size of 1 but got " << end - start; SHAPE_ASSIGN_CHECK(*out_attrs, i, squeezed_dshape); } else { SHAPE_ASSIGN_CHECK(*out_attrs, i, dshape); } } mxnet::TShape back_calculate_dshape = ishape; back_calculate_dshape[real_axis] = 0; for (int d = 0; d < real_axis; ++d) { back_calculate_dshape[d] = (*out_attrs)[0][d]; } if (param.squeeze_axis) { back_calculate_dshape[real_axis] = num_outputs; } else { for (int i = 0; i < num_outputs; ++i) { back_calculate_dshape[real_axis] += (*out_attrs)[i][real_axis]; } } for (int d = real_axis + 1; d < ishape.ndim(); ++d) { if (param.squeeze_axis) { back_calculate_dshape[d] = (*out_attrs)[0][d - 1]; } else { back_calculate_dshape[d] = (*out_attrs)[0][d]; } } SHAPE_ASSIGN_CHECK(*in_attrs, split_enum::kData, back_calculate_dshape); return true; } inline bool SplitOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { using namespace mshadow; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); mxnet::TShape dshape = in_attrs->at(split_enum::kData); if (!mxnet::ndim_is_known(dshape)) return false; if (param.axis >= 0) { CHECK_LT(param.axis, dshape.ndim()); } else { CHECK_LT(param.axis + dshape.ndim(), dshape.ndim()); } int real_axis = param.axis; if (real_axis < 0) { real_axis += dshape.ndim(); } return SplitOpShapeImpl(attrs, in_attrs, out_attrs, real_axis); } struct SplitKernel { /*! * \brief Map function for forward split_v2 operator * \param i global thread id * \param in_data ptr to input buffer * \param out_data ptr to ptr of outputs buffer * \param indices ptr to indices buffer * \param num_sections # of sections after split * \param axis_size size of axis to be splitted on * \param trailing_size step size within the data buffer of the axis to be splitted on */ template<typename DType> static MSHADOW_XINLINE void Map(size_t i, const DType *in_data, DType** out_data, const size_t* indices, const size_t num_sections, const size_t axis_size, const size_t trailing_size) { size_t idx = i / trailing_size % axis_size; size_t target = 0; for (size_t section = 0; section < num_sections && indices[section] <= idx; target = section++) {} DType* target_data = out_data[target]; const size_t mid_idx = idx - indices[target]; const size_t head_idx = i / (trailing_size * axis_size); const size_t tail_idx = i % trailing_size; const size_t section_size = indices[target + 1] - indices[target]; const size_t target_idx = head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx; target_data[target_idx] = in_data[i]; } }; struct ConcatenateKernel { /*! * \brief Map function for backward split_v2 operator * \param i global thread id * \param out_grad ptr to ptr of out grads buffer * \param in_grad ptr to input grad buffer * \param indices ptr to indices buffer * \param num_sections # of sections after split * \param axis_size size of axis to be splitted on * \param trailing_size step size within the data buffer of the axis to be splitted on */ template<typename DType> static MSHADOW_XINLINE void Map(size_t i, DType** out_grad, DType* in_grad, const size_t* indices, const size_t num_sections, const size_t axis_size, const size_t trailing_size) { size_t idx = i / trailing_size % axis_size; size_t src = 0; for (size_t section = 0; section < num_sections && indices[section] <= idx; src = section++) {} DType* src_grad = out_grad[src]; const size_t mid_idx = idx - indices[src]; const size_t head_idx = i / (trailing_size * axis_size); const size_t tail_idx = i % trailing_size; const size_t section_size = indices[src + 1] - indices[src]; const size_t src_idx = head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx; in_grad[i] = src_grad[src_idx]; } }; template<typename xpu> inline void SplitOpForwardImpl(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs, const int real_axis) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& input_data = inputs[split_enum::kData]; size_t leading = 1, trailing = 1; CHECK_LT(real_axis, input_data.ndim()); size_t mid = input_data.shape_[real_axis]; for (int i = 0; i < real_axis; ++i) { leading *= input_data.shape_[i]; } for (int i = real_axis + 1; i < input_data.ndim(); ++i) { trailing *= input_data.shape_[i]; } size_t workspace_size = 0; const mxnet::TShape& ishape = input_data.shape_; const mxnet::TShape split_pts = (param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices; std::vector<size_t> indices; for (const auto& section : split_pts) { indices.push_back(section); } if (param.sections == 0) { indices.push_back(ishape[real_axis]); } workspace_size += indices.size() * sizeof(size_t); MSHADOW_TYPE_SWITCH(input_data.type_flag_, DType, { std::vector<DType*> output_data; for (const TBlob& data : outputs) { output_data.push_back(data.dptr<DType>()); } workspace_size += output_data.size() * sizeof(DType*); Tensor<xpu, 1, char> workspace = ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s); Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size())); Tensor<xpu, 1, size_t> indices_xpu_tensor( reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size())); Tensor<cpu, 1, DType*> ptrs_cpu_tensor(output_data.data(), Shape1(output_data.size())); Tensor<xpu, 1, DType*> ptrs_xpu_tensor( reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)), Shape1(output_data.size())); mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s); mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s); Kernel<SplitKernel, xpu>::Launch( s, input_data.Size(), input_data.dptr<DType>(), ptrs_xpu_tensor.dptr_, indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing); }); } template<typename xpu> inline void SplitOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim()); const TBlob& input_data = inputs[split_enum::kData]; int real_axis = param.axis; if (real_axis < 0) { real_axis += input_data.ndim(); } SplitOpForwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis); } template<typename xpu> inline void SplitOpBackwardImpl(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs, const int real_axis) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); Stream<xpu> *s = ctx.get_stream<xpu>(); TBlob input_grad = outputs[split_enum::kData]; size_t leading = 1, trailing = 1; CHECK_LT(real_axis, input_grad.ndim()); size_t mid = input_grad.shape_[real_axis]; for (int i = 0; i < real_axis; ++i) { leading *= input_grad.shape_[i]; } for (int i = real_axis + 1; i < input_grad.ndim(); ++i) { trailing *= input_grad.shape_[i]; } size_t workspace_size = 0; const mxnet::TShape& ishape = input_grad.shape_; const mxnet::TShape split_pts = (param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices; std::vector<size_t> indices; for (const auto& section : split_pts) { indices.push_back(section); } if (param.sections == 0) { indices.push_back(ishape[real_axis]); } workspace_size += indices.size() * sizeof(size_t); MSHADOW_TYPE_SWITCH(input_grad.type_flag_, DType, { std::vector<DType*> out_grads; for (const TBlob& output_grad : inputs) { out_grads.push_back(output_grad.dptr<DType>()); } workspace_size += out_grads.size() * sizeof(DType*); Tensor<xpu, 1, char> workspace = ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s); Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size())); Tensor<xpu, 1, size_t> indices_xpu_tensor( reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size())); Tensor<cpu, 1, DType*> ptrs_cpu_tensor(out_grads.data(), Shape1(inputs.size())); Tensor<xpu, 1, DType*> ptrs_xpu_tensor( reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)), Shape1(inputs.size())); mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s); mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s); Kernel<ConcatenateKernel, xpu>::Launch( s, input_grad.Size(), ptrs_xpu_tensor.dptr_, input_grad.dptr<DType>(), indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing); }); } template<typename xpu> inline void SplitOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); CHECK_EQ(inputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim()) << "out grad vector size mush match the output size"; CHECK_EQ(outputs.size(), 1U); int real_axis = param.axis; if (real_axis < 0) { real_axis += outputs[split_enum::kData].ndim(); } SplitOpBackwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis); } inline uint32_t SplitNumOutputs(const NodeAttrs& attrs) { const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); return (param.sections > 0) ? param.sections : param.indices.ndim(); } } // namespace op } // namespace mxnet namespace std { template<> struct hash<mxnet::op::TransposeParam> { size_t operator()(const mxnet::op::TransposeParam& val) { size_t ret = 0; ret = dmlc::HashCombine(ret, val.axes); return ret; } }; template<> struct hash<mxnet::op::ReshapeParam> { size_t operator()(const mxnet::op::ReshapeParam& val) { size_t ret = 0; ret = dmlc::HashCombine(ret, val.target_shape); ret = dmlc::HashCombine(ret, val.keep_highest); ret = dmlc::HashCombine(ret, val.shape); ret = dmlc::HashCombine(ret, val.reverse); return ret; } }; template<> struct hash<mxnet::op::ExpandDimParam> { size_t operator()(const mxnet::op::ExpandDimParam& val) { size_t ret = 0; ret = dmlc::HashCombine(ret, val.axis); return ret; } }; } // namespace std #endif // MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
_phonopy.c
/* Copyright (C) 2011 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <Python.h> #include <stdio.h> #include <stddef.h> #include <math.h> #include <float.h> #include <numpy/arrayobject.h> #include <dynmat.h> #include <derivative_dynmat.h> #include <kgrid.h> #include <tetrahedron_method.h> #define KB 8.6173382568083159E-05 /* PHPYCONST is defined in dynmat.h */ /* Build dynamical matrix */ static PyObject * py_transform_dynmat_to_fc(PyObject *self, PyObject *args); static PyObject * py_perm_trans_symmetrize_fc(PyObject *self, PyObject *args); static PyObject * py_perm_trans_symmetrize_compact_fc(PyObject *self, PyObject *args); static PyObject * py_transpose_compact_fc(PyObject *self, PyObject *args); static PyObject * py_get_dynamical_matrix(PyObject *self, PyObject *args); static PyObject * py_get_nac_dynamical_matrix(PyObject *self, PyObject *args); static PyObject * py_get_dipole_dipole(PyObject *self, PyObject *args); static PyObject * py_get_dipole_dipole_q0(PyObject *self, PyObject *args); static PyObject * py_get_derivative_dynmat(PyObject *self, PyObject *args); static PyObject * py_get_thermal_properties(PyObject *self, PyObject *args); static PyObject * py_distribute_fc2(PyObject *self, PyObject *args); static PyObject * py_compute_permutation(PyObject *self, PyObject *args); static PyObject * py_gsv_copy_smallest_vectors(PyObject *self, PyObject *args); static PyObject * py_gsv_set_smallest_vectors(PyObject *self, PyObject *args); static PyObject * py_thm_neighboring_grid_points(PyObject *self, PyObject *args); static PyObject * py_thm_relative_grid_address(PyObject *self, PyObject *args); static PyObject * py_thm_all_relative_grid_address(PyObject *self, PyObject *args); static PyObject * py_thm_integration_weight(PyObject *self, PyObject *args); static PyObject * py_thm_integration_weight_at_omegas(PyObject *self, PyObject *args); static PyObject * py_get_tetrahedra_frequenies(PyObject *self, PyObject *args); static PyObject * py_tetrahedron_method_dos(PyObject *self, PyObject *args); static void distribute_fc2(double (*fc2)[3][3], const int * atom_list, const int len_atom_list, PHPYCONST double (*r_carts)[3][3], const int * permutations, const int * map_atoms, const int * map_syms, const int num_rot, const int num_pos); static int compute_permutation(int * rot_atom, PHPYCONST double lat[3][3], PHPYCONST double (*pos)[3], PHPYCONST double (*rot_pos)[3], const int num_pos, const double symprec); static void gsv_copy_smallest_vectors(double (*shortest_vectors)[27][3], int * multiplicity, PHPYCONST double (*vector_lists)[27][3], PHPYCONST double (*length_lists)[27], const int num_lists, const double symprec); static void gsv_set_smallest_vectors(double (*smallest_vectors)[27][3], int *multiplicity, PHPYCONST double (*pos_to)[3], const int num_pos_to, PHPYCONST double (*pos_from)[3], const int num_pos_from, PHPYCONST int lattice_points[27][3], PHPYCONST double reduced_basis[3][3], PHPYCONST int trans_mat[3][3], const double symprec); static double get_free_energy(const double temperature, const double f); static double get_entropy(const double temperature, const double f); static double get_heat_capacity(const double temperature, const double f); static void set_index_permutation_symmetry_fc(double * fc, const int natom); static void set_translational_symmetry_fc(double * fc, const int natom); static void set_index_permutation_symmetry_compact_fc(double * fc, const int p2s[], const int s2pp[], const int nsym_list[], const int perms[], const int n_satom, const int n_patom, const int is_transpose); static void set_translational_symmetry_compact_fc(double * fc, const int p2s[], const int n_satom, const int n_patom); /* static double get_energy(double temperature, double f); */ static int nint(const double a); struct module_state { PyObject *error; }; #if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) #else #define GETSTATE(m) (&_state) static struct module_state _state; #endif static PyObject * error_out(PyObject *m) { struct module_state *st = GETSTATE(m); PyErr_SetString(st->error, "something bad happened"); return NULL; } static PyMethodDef _phonopy_methods[] = { {"error_out", (PyCFunction)error_out, METH_NOARGS, NULL}, {"transform_dynmat_to_fc", py_transform_dynmat_to_fc, METH_VARARGS, "Transform a set of dynmat to force constants"}, {"perm_trans_symmetrize_fc", py_perm_trans_symmetrize_fc, METH_VARARGS, "Enforce permutation and translational symmetry of force constants"}, {"perm_trans_symmetrize_compact_fc", py_perm_trans_symmetrize_compact_fc, METH_VARARGS, "Enforce permutation and translational symmetry of compact force constants"}, {"transpose_compact_fc", py_transpose_compact_fc, METH_VARARGS, "Transpose compact force constants"}, {"dynamical_matrix", py_get_dynamical_matrix, METH_VARARGS, "Dynamical matrix"}, {"nac_dynamical_matrix", py_get_nac_dynamical_matrix, METH_VARARGS, "NAC dynamical matrix"}, {"dipole_dipole", py_get_dipole_dipole, METH_VARARGS, "Dipole-dipole interaction"}, {"dipole_dipole_q0", py_get_dipole_dipole_q0, METH_VARARGS, "q=0 terms of Dipole-dipole interaction"}, {"derivative_dynmat", py_get_derivative_dynmat, METH_VARARGS, "Q derivative of dynamical matrix"}, {"thermal_properties", py_get_thermal_properties, METH_VARARGS, "Thermal properties"}, {"distribute_fc2", py_distribute_fc2, METH_VARARGS, "Distribute force constants for all atoms in atom_list using precomputed symmetry mappings."}, {"compute_permutation", py_compute_permutation, METH_VARARGS, "Compute indices of original points in a set of rotated points."}, {"gsv_copy_smallest_vectors", py_gsv_copy_smallest_vectors, METH_VARARGS, "Implementation detail of get_smallest_vectors."}, {"gsv_set_smallest_vectors", py_gsv_set_smallest_vectors, METH_VARARGS, "Set candidate vectors."}, {"neighboring_grid_points", py_thm_neighboring_grid_points, METH_VARARGS, "Neighboring grid points by relative grid addresses"}, {"tetrahedra_relative_grid_address", py_thm_relative_grid_address, METH_VARARGS, "Relative grid addresses of vertices of 24 tetrahedra"}, {"all_tetrahedra_relative_grid_address", py_thm_all_relative_grid_address, METH_VARARGS, "4 (all) sets of relative grid addresses of vertices of 24 tetrahedra"}, {"tetrahedra_integration_weight", py_thm_integration_weight, METH_VARARGS, "Integration weight for tetrahedron method"}, {"tetrahedra_integration_weight_at_omegas", py_thm_integration_weight_at_omegas, METH_VARARGS, "Integration weight for tetrahedron method at omegas"}, {"get_tetrahedra_frequencies", py_get_tetrahedra_frequenies, METH_VARARGS, "Run tetrahedron method"}, {"tetrahedron_method_dos", py_tetrahedron_method_dos, METH_VARARGS, "Run tetrahedron method"}, {NULL, NULL, 0, NULL} }; #if PY_MAJOR_VERSION >= 3 static int _phonopy_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->error); return 0; } static int _phonopy_clear(PyObject *m) { Py_CLEAR(GETSTATE(m)->error); return 0; } static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_phonopy", NULL, sizeof(struct module_state), _phonopy_methods, NULL, _phonopy_traverse, _phonopy_clear, NULL }; #define INITERROR return NULL PyObject * PyInit__phonopy(void) #else #define INITERROR return void init_phonopy(void) #endif { #if PY_MAJOR_VERSION >= 3 PyObject *module = PyModule_Create(&moduledef); #else PyObject *module = Py_InitModule("_phonopy", _phonopy_methods); #endif struct module_state *st; if (module == NULL) INITERROR; st = GETSTATE(module); st->error = PyErr_NewException("_phonopy.Error", NULL, NULL); if (st->error == NULL) { Py_DECREF(module); INITERROR; } #if PY_MAJOR_VERSION >= 3 return module; #endif } static PyObject * py_transform_dynmat_to_fc(PyObject *self, PyObject *args) { PyArrayObject* py_force_constants; PyArrayObject* py_dynamical_matrices; PyArrayObject* py_commensurate_points; PyArrayObject* py_shortest_vectors; PyArrayObject* py_multiplicities; PyArrayObject* py_masses; PyArrayObject* py_s2pp_map; PyArrayObject* py_fc_index_map; double* fc; double* dm; double (*comm_points)[3]; double (*shortest_vectors)[27][3]; double* masses; int* multiplicities; int* s2pp_map; int* fc_index_map; int num_patom; int num_satom; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_force_constants, &py_dynamical_matrices, &py_commensurate_points, &py_shortest_vectors, &py_multiplicities, &py_masses, &py_s2pp_map, &py_fc_index_map)) { return NULL; } fc = (double*)PyArray_DATA(py_force_constants); dm = (double*)PyArray_DATA(py_dynamical_matrices); comm_points = (double(*)[3])PyArray_DATA(py_commensurate_points); shortest_vectors = (double(*)[27][3])PyArray_DATA(py_shortest_vectors); masses = (double*)PyArray_DATA(py_masses); multiplicities = (int*)PyArray_DATA(py_multiplicities); s2pp_map = (int*)PyArray_DATA(py_s2pp_map); fc_index_map = (int*)PyArray_DATA(py_fc_index_map); num_patom = PyArray_DIMS(py_multiplicities)[1]; num_satom = PyArray_DIMS(py_multiplicities)[0]; dym_transform_dynmat_to_fc(fc, dm, comm_points, shortest_vectors, multiplicities, masses, s2pp_map, fc_index_map, num_patom, num_satom); Py_RETURN_NONE; } static PyObject * py_compute_permutation(PyObject *self, PyObject *args) { PyArrayObject* permutation; PyArrayObject* lattice; PyArrayObject* positions; PyArrayObject* permuted_positions; double symprec; int* rot_atoms; double (*lat)[3]; double (*pos)[3]; double (*rot_pos)[3]; int num_pos; int is_found; if (!PyArg_ParseTuple(args, "OOOOd", &permutation, &lattice, &positions, &permuted_positions, &symprec)) { return NULL; } rot_atoms = (int*)PyArray_DATA(permutation); lat = (double(*)[3])PyArray_DATA(lattice); pos = (double(*)[3])PyArray_DATA(positions); rot_pos = (double(*)[3])PyArray_DATA(permuted_positions); num_pos = PyArray_DIMS(positions)[0]; is_found = compute_permutation(rot_atoms, lat, pos, rot_pos, num_pos, symprec); return Py_BuildValue("i", is_found); } static PyObject * py_gsv_copy_smallest_vectors(PyObject *self, PyObject *args) { PyArrayObject* py_shortest_vectors; PyArrayObject* py_multiplicity; PyArrayObject* py_vectors; PyArrayObject* py_lengths; double symprec; double (*shortest_vectors)[27][3]; double (*vectors)[27][3]; double (*lengths)[27]; int * multiplicity; int size_super, size_prim; if (!PyArg_ParseTuple(args, "OOOOd", &py_shortest_vectors, &py_multiplicity, &py_vectors, &py_lengths, &symprec)) { return NULL; } shortest_vectors = (double(*)[27][3])PyArray_DATA(py_shortest_vectors); multiplicity = (int*)PyArray_DATA(py_multiplicity); vectors = (double(*)[27][3])PyArray_DATA(py_vectors); lengths = (double(*)[27])PyArray_DATA(py_lengths); size_super = PyArray_DIMS(py_vectors)[0]; size_prim = PyArray_DIMS(py_vectors)[1]; gsv_copy_smallest_vectors(shortest_vectors, multiplicity, vectors, lengths, size_super * size_prim, symprec); Py_RETURN_NONE; } static PyObject * py_gsv_set_smallest_vectors(PyObject *self, PyObject *args) { PyArrayObject* py_smallest_vectors; PyArrayObject* py_multiplicity; PyArrayObject* py_pos_to; PyArrayObject* py_pos_from; PyArrayObject* py_lattice_points; PyArrayObject* py_reduced_basis; PyArrayObject* py_trans_mat; double symprec; double (*smallest_vectors)[27][3]; int * multiplicity; double (*pos_to)[3]; double (*pos_from)[3]; int (*lattice_points)[3]; double (*reduced_basis)[3]; int (*trans_mat)[3]; int num_pos_to, num_pos_from; if (!PyArg_ParseTuple(args, "OOOOOOOd", &py_smallest_vectors, &py_multiplicity, &py_pos_to, &py_pos_from, &py_lattice_points, &py_reduced_basis, &py_trans_mat, &symprec)) { return NULL; } smallest_vectors = (double(*)[27][3])PyArray_DATA(py_smallest_vectors); multiplicity = (int*)PyArray_DATA(py_multiplicity); pos_to = (double(*)[3])PyArray_DATA(py_pos_to); pos_from = (double(*)[3])PyArray_DATA(py_pos_from); num_pos_to = PyArray_DIMS(py_pos_to)[0]; num_pos_from = PyArray_DIMS(py_pos_from)[0]; lattice_points = (int(*)[3])PyArray_DATA(py_lattice_points); reduced_basis = (double(*)[3])PyArray_DATA(py_reduced_basis); trans_mat = (int(*)[3])PyArray_DATA(py_trans_mat); gsv_set_smallest_vectors(smallest_vectors, multiplicity, pos_to, num_pos_to, pos_from, num_pos_from, lattice_points, reduced_basis, trans_mat, symprec); Py_RETURN_NONE; } static PyObject * py_perm_trans_symmetrize_fc(PyObject *self, PyObject *args) { PyArrayObject* force_constants; double *fc; int level; int n_satom, i, j, k, l, iter; double sum; if (!PyArg_ParseTuple(args, "Oi", &force_constants, &level)) { return NULL; } fc = (double*)PyArray_DATA(force_constants); n_satom = PyArray_DIMS(force_constants)[0]; for (iter=0; iter < level; iter++) { /* Subtract drift along column */ for (j = 0; j < n_satom; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (i = 0; i < n_satom; i++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (i = 0; i < n_satom; i++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } /* Subtract drift along row */ for (i = 0; i < n_satom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (j = 0; j < n_satom; j++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (j = 0; j < n_satom; j++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } set_index_permutation_symmetry_fc(fc, n_satom); } set_translational_symmetry_fc(fc, n_satom); Py_RETURN_NONE; } static PyObject * py_perm_trans_symmetrize_compact_fc(PyObject *self, PyObject *args) { PyArrayObject* py_fc; PyArrayObject* py_permutations; PyArrayObject* py_s2pp_map; PyArrayObject* py_p2s_map; PyArrayObject* py_nsym_list; int level; double *fc; int *perms; int *s2pp; int *p2s; int *nsym_list; int n_patom, n_satom, i, j, k, l, n, iter; double sum; if (!PyArg_ParseTuple(args, "OOOOOi", &py_fc, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list, &level)) { return NULL; } fc = (double*)PyArray_DATA(py_fc); perms = (int*)PyArray_DATA(py_permutations); s2pp = (int*)PyArray_DATA(py_s2pp_map); p2s = (int*)PyArray_DATA(py_p2s_map); nsym_list = (int*)PyArray_DATA(py_nsym_list); n_patom = PyArray_DIMS(py_fc)[0]; n_satom = PyArray_DIMS(py_fc)[1]; for (iter=0; iter < level; iter++) { for (n = 0; n < 2; n++) { /* transpose only */ set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 1); for (i = 0; i < n_patom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (j = 0; j < n_satom; j++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (j = 0; j < n_satom; j++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } } set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 0); } set_translational_symmetry_compact_fc(fc, p2s, n_satom, n_patom); Py_RETURN_NONE; } static PyObject * py_transpose_compact_fc(PyObject *self, PyObject *args) { PyArrayObject* py_fc; PyArrayObject* py_permutations; PyArrayObject* py_s2pp_map; PyArrayObject* py_p2s_map; PyArrayObject* py_nsym_list; double *fc; int *s2pp; int *p2s; int *nsym_list; int *perms; int n_patom, n_satom; if (!PyArg_ParseTuple(args, "OOOOO", &py_fc, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list)) { return NULL; } fc = (double*)PyArray_DATA(py_fc); perms = (int*)PyArray_DATA(py_permutations); s2pp = (int*)PyArray_DATA(py_s2pp_map); p2s = (int*)PyArray_DATA(py_p2s_map); nsym_list = (int*)PyArray_DATA(py_nsym_list); n_patom = PyArray_DIMS(py_fc)[0]; n_satom = PyArray_DIMS(py_fc)[1]; set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 1); Py_RETURN_NONE; } static PyObject * py_get_dynamical_matrix(PyObject *self, PyObject *args) { PyArrayObject* py_dynamical_matrix; PyArrayObject* py_force_constants; PyArrayObject* py_shortest_vectors; PyArrayObject* py_q; PyArrayObject* py_multiplicities; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; double* dm; double* fc; double* q; double (*svecs)[27][3]; double* m; int* multi; int* s2p_map; int* p2s_map; int num_patom; int num_satom; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_dynamical_matrix, &py_force_constants, &py_q, &py_shortest_vectors, &py_multiplicities, &py_masses, &py_s2p_map, &py_p2s_map)) { return NULL; } dm = (double*)PyArray_DATA(py_dynamical_matrix); fc = (double*)PyArray_DATA(py_force_constants); q = (double*)PyArray_DATA(py_q); svecs = (double(*)[27][3])PyArray_DATA(py_shortest_vectors); m = (double*)PyArray_DATA(py_masses); multi = (int*)PyArray_DATA(py_multiplicities); s2p_map = (int*)PyArray_DATA(py_s2p_map); p2s_map = (int*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; dym_get_dynamical_matrix_at_q(dm, num_patom, num_satom, fc, q, svecs, multi, m, s2p_map, p2s_map, NULL, 1); Py_RETURN_NONE; } static PyObject * py_get_nac_dynamical_matrix(PyObject *self, PyObject *args) { PyArrayObject* py_dynamical_matrix; PyArrayObject* py_force_constants; PyArrayObject* py_shortest_vectors; PyArrayObject* py_q_cart; PyArrayObject* py_q; PyArrayObject* py_multiplicities; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; PyArrayObject* py_born; double factor; double* dm; double* fc; double* q_cart; double* q; double (*svecs)[27][3]; double* m; double (*born)[3][3]; int* multi; int* s2p_map; int* p2s_map; int num_patom; int num_satom; int n; double (*charge_sum)[3][3]; if (!PyArg_ParseTuple(args, "OOOOOOOOOOd", &py_dynamical_matrix, &py_force_constants, &py_q, &py_shortest_vectors, &py_multiplicities, &py_masses, &py_s2p_map, &py_p2s_map, &py_q_cart, &py_born, &factor)) return NULL; dm = (double*)PyArray_DATA(py_dynamical_matrix); fc = (double*)PyArray_DATA(py_force_constants); q_cart = (double*)PyArray_DATA(py_q_cart); q = (double*)PyArray_DATA(py_q); svecs = (double(*)[27][3])PyArray_DATA(py_shortest_vectors); m = (double*)PyArray_DATA(py_masses); born = (double(*)[3][3])PyArray_DATA(py_born); multi = (int*)PyArray_DATA(py_multiplicities); s2p_map = (int*)PyArray_DATA(py_s2p_map); p2s_map = (int*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; charge_sum = (double(*)[3][3]) malloc(sizeof(double[3][3]) * num_patom * num_patom); n = num_satom / num_patom; dym_get_charge_sum(charge_sum, num_patom, factor / n, q_cart, born); dym_get_dynamical_matrix_at_q(dm, num_patom, num_satom, fc, q, svecs, multi, m, s2p_map, p2s_map, charge_sum, 1); free(charge_sum); Py_RETURN_NONE; } static PyObject * py_get_dipole_dipole(PyObject *self, PyObject *args) { PyArrayObject* py_dd; PyArrayObject* py_dd_q0; PyArrayObject* py_G_list; PyArrayObject* py_q_cart; PyArrayObject* py_q_direction; PyArrayObject* py_born; PyArrayObject* py_dielectric; PyArrayObject* py_positions; double factor; double lambda; double tolerance; double* dd; double* dd_q0; double (*G_list)[3]; double* q_vector; double* q_direction; double (*born)[3][3]; double (*dielectric)[3]; double (*pos)[3]; int num_patom, num_G; if (!PyArg_ParseTuple(args, "OOOOOOOOddd", &py_dd, &py_dd_q0, &py_G_list, &py_q_cart, &py_q_direction, &py_born, &py_dielectric, &py_positions, &factor, &lambda, &tolerance)) return NULL; dd = (double*)PyArray_DATA(py_dd); dd_q0 = (double*)PyArray_DATA(py_dd_q0); G_list = (double(*)[3])PyArray_DATA(py_G_list); if ((PyObject*)py_q_direction == Py_None) { q_direction = NULL; } else { q_direction = (double*)PyArray_DATA(py_q_direction); } q_vector = (double*)PyArray_DATA(py_q_cart); born = (double(*)[3][3])PyArray_DATA(py_born); dielectric = (double(*)[3])PyArray_DATA(py_dielectric); pos = (double(*)[3])PyArray_DATA(py_positions); num_G = PyArray_DIMS(py_G_list)[0]; num_patom = PyArray_DIMS(py_positions)[0]; dym_get_dipole_dipole(dd, /* [natom, 3, natom, 3, (real, imag)] */ dd_q0, /* [natom, 3, 3, (real, imag)] */ G_list, /* [num_kvec, 3] */ num_G, num_patom, q_vector, q_direction, born, dielectric, pos, /* [natom, 3] */ factor, /* 4pi/V*unit-conv */ lambda, /* 4 * Lambda^2 */ tolerance); Py_RETURN_NONE; } static PyObject * py_get_dipole_dipole_q0(PyObject *self, PyObject *args) { PyArrayObject* py_dd_q0; PyArrayObject* py_G_list; PyArrayObject* py_born; PyArrayObject* py_dielectric; PyArrayObject* py_positions; double lambda; double tolerance; double* dd_q0; double (*G_list)[3]; double (*born)[3][3]; double (*dielectric)[3]; double (*pos)[3]; int num_patom, num_G; if (!PyArg_ParseTuple(args, "OOOOOdd", &py_dd_q0, &py_G_list, &py_born, &py_dielectric, &py_positions, &lambda, &tolerance)) return NULL; dd_q0 = (double*)PyArray_DATA(py_dd_q0); G_list = (double(*)[3])PyArray_DATA(py_G_list); born = (double(*)[3][3])PyArray_DATA(py_born); dielectric = (double(*)[3])PyArray_DATA(py_dielectric); pos = (double(*)[3])PyArray_DATA(py_positions); num_G = PyArray_DIMS(py_G_list)[0]; num_patom = PyArray_DIMS(py_positions)[0]; dym_get_dipole_dipole_q0(dd_q0, /* [natom, 3, 3, (real, imag)] */ G_list, /* [num_kvec, 3] */ num_G, num_patom, born, dielectric, pos, /* [natom, 3] */ lambda, /* 4 * Lambda^2 */ tolerance); Py_RETURN_NONE; } static PyObject * py_get_derivative_dynmat(PyObject *self, PyObject *args) { PyArrayObject* derivative_dynmat; PyArrayObject* py_force_constants; PyArrayObject* r_vector; PyArrayObject* lattice; PyArrayObject* q_vector; PyArrayObject* py_multiplicities; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; PyArrayObject* py_born; PyArrayObject* dielectric; PyArrayObject* q_direction; double nac_factor; double* ddm; double* fc; double* q; double* lat; double* r; double* m; int* multi; int* s2p_map; int* p2s_map; int num_patom; int num_satom; double *z; double *epsilon; double *q_dir; if (!PyArg_ParseTuple(args, "OOOOOOOOOdOOO", &derivative_dynmat, &py_force_constants, &q_vector, &lattice, /* column vectors */ &r_vector, &py_multiplicities, &py_masses, &py_s2p_map, &py_p2s_map, &nac_factor, &py_born, &dielectric, &q_direction)) { return NULL; } ddm = (double*)PyArray_DATA(derivative_dynmat); fc = (double*)PyArray_DATA(py_force_constants); q = (double*)PyArray_DATA(q_vector); lat = (double*)PyArray_DATA(lattice); r = (double*)PyArray_DATA(r_vector); m = (double*)PyArray_DATA(py_masses); multi = (int*)PyArray_DATA(py_multiplicities); s2p_map = (int*)PyArray_DATA(py_s2p_map); p2s_map = (int*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; if ((PyObject*)py_born == Py_None) { z = NULL; } else { z = (double*)PyArray_DATA(py_born); } if ((PyObject*)dielectric == Py_None) { epsilon = NULL; } else { epsilon = (double*)PyArray_DATA(dielectric); } if ((PyObject*)q_direction == Py_None) { q_dir = NULL; } else { q_dir = (double*)PyArray_DATA(q_direction); } get_derivative_dynmat_at_q(ddm, num_patom, num_satom, fc, q, lat, r, multi, m, s2p_map, p2s_map, nac_factor, z, epsilon, q_dir); Py_RETURN_NONE; } /* Thermal properties */ static PyObject * py_get_thermal_properties(PyObject *self, PyObject *args) { PyArrayObject* py_thermal_props; PyArrayObject* py_temperatures; PyArrayObject* py_frequencies; PyArrayObject* py_weights; double cutoff_frequency; double *temperatures; double* freqs; double *thermal_props; int* w; int num_qpoints; int num_bands; int num_temp; int i, j, k; double f; double *tp; if (!PyArg_ParseTuple(args, "OOOOd", &py_thermal_props, &py_temperatures, &py_frequencies, &py_weights, &cutoff_frequency)) { return NULL; } thermal_props = (double*)PyArray_DATA(py_thermal_props); temperatures = (double*)PyArray_DATA(py_temperatures); num_temp = PyArray_DIMS(py_temperatures)[0]; freqs = (double*)PyArray_DATA(py_frequencies); num_qpoints = PyArray_DIMS(py_frequencies)[0]; w = (int*)PyArray_DATA(py_weights); num_bands = PyArray_DIMS(py_frequencies)[1]; tp = (double*)malloc(sizeof(double) * num_qpoints * num_temp * 3); for (i = 0; i < num_qpoints * num_temp * 3; i++) { tp[i] = 0; } #pragma omp parallel for private(j, k, f) for (i = 0; i < num_qpoints; i++){ for (j = 0; j < num_temp; j++) { for (k = 0; k < num_bands; k++){ f = freqs[i * num_bands + k]; if (temperatures[j] > 0 && f > cutoff_frequency) { tp[i * num_temp * 3 + j * 3] += get_free_energy(temperatures[j], f) * w[i]; tp[i * num_temp * 3 + j * 3 + 1] += get_entropy(temperatures[j], f) * w[i]; tp[i * num_temp * 3 + j * 3 + 2] += get_heat_capacity(temperatures[j], f) * w[i]; } } } } for (i = 0; i < num_qpoints; i++) { for (j = 0; j < num_temp * 3; j++) { thermal_props[j] += tp[i * num_temp * 3 + j]; } } free(tp); tp = NULL; Py_RETURN_NONE; } static PyObject * py_distribute_fc2(PyObject *self, PyObject *args) { PyArrayObject* py_force_constants; PyArrayObject* py_permutations; PyArrayObject* py_map_atoms; PyArrayObject* py_map_syms; PyArrayObject* py_atom_list; PyArrayObject* py_rotations_cart; double (*r_carts)[3][3]; double (*fc2)[3][3]; int *permutations; int *map_atoms; int *map_syms; int *atom_list; npy_intp num_pos, num_rot, len_atom_list; if (!PyArg_ParseTuple(args, "OOOOOO", &py_force_constants, &py_atom_list, &py_rotations_cart, &py_permutations, &py_map_atoms, &py_map_syms)) { return NULL; } fc2 = (double(*)[3][3])PyArray_DATA(py_force_constants); atom_list = (int*)PyArray_DATA(py_atom_list); len_atom_list = PyArray_DIMS(py_atom_list)[0]; permutations = (int*)PyArray_DATA(py_permutations); map_atoms = (int*)PyArray_DATA(py_map_atoms); map_syms = (int*)PyArray_DATA(py_map_syms); r_carts = (double(*)[3][3])PyArray_DATA(py_rotations_cart); num_rot = PyArray_DIMS(py_permutations)[0]; num_pos = PyArray_DIMS(py_permutations)[1]; if (PyArray_NDIM(py_map_atoms) != 1 || PyArray_DIMS(py_map_atoms)[0] != num_pos) { PyErr_SetString(PyExc_ValueError, "wrong shape for map_atoms"); return NULL; } if (PyArray_NDIM(py_map_syms) != 1 || PyArray_DIMS(py_map_syms)[0] != num_pos) { PyErr_SetString(PyExc_ValueError, "wrong shape for map_syms"); return NULL; } if (PyArray_DIMS(py_rotations_cart)[0] != num_rot) { PyErr_SetString(PyExc_ValueError, "permutations and rotations are different length"); return NULL; } distribute_fc2(fc2, atom_list, len_atom_list, r_carts, permutations, map_atoms, map_syms, num_rot, num_pos); Py_RETURN_NONE; } static PyObject *py_thm_neighboring_grid_points(PyObject *self, PyObject *args) { PyArrayObject* py_relative_grid_points; PyArrayObject* py_relative_grid_address; PyArrayObject* py_mesh; PyArrayObject* py_bz_grid_address; PyArrayObject* py_bz_map; long grid_point; int (*relative_grid_address)[3]; int num_relative_grid_address; int *mesh; int (*bz_grid_address)[3]; size_t *bz_map_size_t; size_t *relative_grid_points_size_t; if (!PyArg_ParseTuple(args, "OlOOOO", &py_relative_grid_points, &grid_point, &py_relative_grid_address, &py_mesh, &py_bz_grid_address, &py_bz_map)) { return NULL; } relative_grid_address = (int(*)[3])PyArray_DATA(py_relative_grid_address); num_relative_grid_address = PyArray_DIMS(py_relative_grid_address)[0]; mesh = (int*)PyArray_DATA(py_mesh); bz_grid_address = (int(*)[3])PyArray_DATA(py_bz_grid_address); bz_map_size_t = (size_t*)PyArray_DATA(py_bz_map); relative_grid_points_size_t = (size_t*)PyArray_DATA(py_relative_grid_points); thm_get_dense_neighboring_grid_points(relative_grid_points_size_t, grid_point, relative_grid_address, num_relative_grid_address, mesh, bz_grid_address, bz_map_size_t); Py_RETURN_NONE; } static PyObject * py_thm_relative_grid_address(PyObject *self, PyObject *args) { PyArrayObject* py_relative_grid_address; PyArrayObject* py_reciprocal_lattice_py; int (*relative_grid_address)[4][3]; double (*reciprocal_lattice)[3]; if (!PyArg_ParseTuple(args, "OO", &py_relative_grid_address, &py_reciprocal_lattice_py)) { return NULL; } relative_grid_address = (int(*)[4][3])PyArray_DATA(py_relative_grid_address); reciprocal_lattice = (double(*)[3])PyArray_DATA(py_reciprocal_lattice_py); thm_get_relative_grid_address(relative_grid_address, reciprocal_lattice); Py_RETURN_NONE; } static PyObject * py_thm_all_relative_grid_address(PyObject *self, PyObject *args) { PyArrayObject* py_relative_grid_address; int (*relative_grid_address)[24][4][3]; if (!PyArg_ParseTuple(args, "O", &py_relative_grid_address)) { return NULL; } relative_grid_address = (int(*)[24][4][3])PyArray_DATA(py_relative_grid_address); thm_get_all_relative_grid_address(relative_grid_address); Py_RETURN_NONE; } static PyObject * py_thm_integration_weight(PyObject *self, PyObject *args) { double omega; PyArrayObject* py_tetrahedra_omegas; char* function; double (*tetrahedra_omegas)[4]; double iw; if (!PyArg_ParseTuple(args, "dOs", &omega, &py_tetrahedra_omegas, &function)) { return NULL; } tetrahedra_omegas = (double(*)[4])PyArray_DATA(py_tetrahedra_omegas); iw = thm_get_integration_weight(omega, tetrahedra_omegas, function[0]); return PyFloat_FromDouble(iw); } static PyObject * py_thm_integration_weight_at_omegas(PyObject *self, PyObject *args) { PyArrayObject* py_integration_weights; PyArrayObject* py_omegas; PyArrayObject* py_tetrahedra_omegas; char* function; double *omegas; double *iw; int num_omegas; double (*tetrahedra_omegas)[4]; if (!PyArg_ParseTuple(args, "OOOs", &py_integration_weights, &py_omegas, &py_tetrahedra_omegas, &function)) { return NULL; } omegas = (double*)PyArray_DATA(py_omegas); iw = (double*)PyArray_DATA(py_integration_weights); num_omegas = (int)PyArray_DIMS(py_omegas)[0]; tetrahedra_omegas = (double(*)[4])PyArray_DATA(py_tetrahedra_omegas); thm_get_integration_weight_at_omegas(iw, num_omegas, omegas, tetrahedra_omegas, function[0]); Py_RETURN_NONE; } static PyObject * py_get_tetrahedra_frequenies(PyObject *self, PyObject *args) { PyArrayObject* py_freq_tetras; PyArrayObject* py_grid_points; PyArrayObject* py_mesh; PyArrayObject* py_grid_address; PyArrayObject* py_gp_ir_index; PyArrayObject* py_relative_grid_address; PyArrayObject* py_frequencies; double* freq_tetras; size_t* grid_points; int num_gp_in; int* mesh; int (*grid_address)[3]; size_t* gp_ir_index; int (*relative_grid_address)[3]; double* frequencies; int num_band; int is_shift[3] = {0, 0, 0}; size_t i, j, k, gp; int g_addr[3]; int address_double[3]; if (!PyArg_ParseTuple(args, "OOOOOOO", &py_freq_tetras, &py_grid_points, &py_mesh, &py_grid_address, &py_gp_ir_index, &py_relative_grid_address, &py_frequencies)) { return NULL; } freq_tetras = (double*)PyArray_DATA(py_freq_tetras); grid_points = (size_t*)PyArray_DATA(py_grid_points); num_gp_in = (int)PyArray_DIMS(py_grid_points)[0]; mesh = (int*)PyArray_DATA(py_mesh); grid_address = (int(*)[3])PyArray_DATA(py_grid_address); gp_ir_index = (size_t*)PyArray_DATA(py_gp_ir_index); relative_grid_address = (int(*)[3])PyArray_DATA(py_relative_grid_address); frequencies = (double*)PyArray_DATA(py_frequencies); num_band = (int)PyArray_DIMS(py_frequencies)[1]; for (i = 0; i < num_gp_in; i++) { #pragma omp parallel for private(k, g_addr, gp, address_double) for (j = 0; j < num_band * 96; j++) { for (k = 0; k < 3; k++) { g_addr[k] = grid_address[grid_points[i]][k] + relative_grid_address[j % 96][k]; } kgd_get_grid_address_double_mesh(address_double, g_addr, mesh, is_shift); gp = kgd_get_dense_grid_point_double_mesh(address_double, mesh); freq_tetras[i * num_band * 96 + j] = frequencies[gp_ir_index[gp] * num_band + j / 96]; } } Py_RETURN_NONE; } static PyObject * py_tetrahedron_method_dos(PyObject *self, PyObject *args) { PyArrayObject* py_dos; PyArrayObject* py_mesh; PyArrayObject* py_freq_points; PyArrayObject* py_frequencies; PyArrayObject* py_coef; PyArrayObject* py_grid_address; PyArrayObject* py_grid_mapping_table; PyArrayObject* py_relative_grid_address; double *dos; int* mesh; double* freq_points; int num_freq_points; double* frequencies; double* coef; int (*grid_address)[3]; size_t num_gp, num_ir_gp; int num_coef; int num_band; size_t *grid_mapping_table; int (*relative_grid_address)[4][3]; int is_shift[3] = {0, 0, 0}; size_t i, j, k, l, m, q, r, count; size_t ir_gps[24][4]; int g_addr[3]; double tetrahedra[24][4]; int address_double[3]; size_t *gp2ir, *ir_grid_points; int *weights; double iw; gp2ir = NULL; ir_grid_points = NULL; weights = NULL; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_dos, &py_mesh, &py_freq_points, &py_frequencies, &py_coef, &py_grid_address, &py_grid_mapping_table, &py_relative_grid_address)) { return NULL; } /* dos[num_ir_gp][num_band][num_freq_points][num_coef] */ dos = (double*)PyArray_DATA(py_dos); mesh = (int*)PyArray_DATA(py_mesh); freq_points = (double*)PyArray_DATA(py_freq_points); num_freq_points = (int)PyArray_DIMS(py_freq_points)[0]; frequencies = (double*)PyArray_DATA(py_frequencies); num_ir_gp = (size_t)PyArray_DIMS(py_frequencies)[0]; num_band = (int)PyArray_DIMS(py_frequencies)[1]; coef = (double*)PyArray_DATA(py_coef); num_coef = (int)PyArray_DIMS(py_coef)[1]; grid_address = (int(*)[3])PyArray_DATA(py_grid_address); num_gp = (size_t)PyArray_DIMS(py_grid_address)[0]; grid_mapping_table = (size_t*)PyArray_DATA(py_grid_mapping_table); relative_grid_address = (int(*)[4][3])PyArray_DATA(py_relative_grid_address); gp2ir = (size_t*)malloc(sizeof(size_t) * num_gp); ir_grid_points = (size_t*)malloc(sizeof(size_t) * num_ir_gp); weights = (int*)malloc(sizeof(int) * num_ir_gp); count = 0; for (i = 0; i < num_gp; i++) { if (grid_mapping_table[i] == i) { gp2ir[i] = count; ir_grid_points[count] = i; weights[count] = 1; count++; } else { gp2ir[i] = gp2ir[grid_mapping_table[i]]; weights[gp2ir[i]]++; } } if (num_ir_gp != count) { printf("Something is wrong!\n"); } #pragma omp parallel for private(j, k, l, m, q, r, iw, ir_gps, g_addr, tetrahedra, address_double) for (i = 0; i < num_ir_gp; i++) { /* set 24 tetrahedra */ for (l = 0; l < 24; l++) { for (q = 0; q < 4; q++) { for (r = 0; r < 3; r++) { g_addr[r] = grid_address[ir_grid_points[i]][r] + relative_grid_address[l][q][r]; } kgd_get_grid_address_double_mesh(address_double, g_addr, mesh, is_shift); ir_gps[l][q] = gp2ir[kgd_get_grid_point_double_mesh(address_double, mesh)]; } } for (k = 0; k < num_band; k++) { for (l = 0; l < 24; l++) { for (q = 0; q < 4; q++) { tetrahedra[l][q] = frequencies[ir_gps[l][q] * num_band + k]; } } for (j = 0; j < num_freq_points; j++) { iw = thm_get_integration_weight(freq_points[j], tetrahedra, 'I') * weights[i]; for (m = 0; m < num_coef; m++) { dos[i * num_band * num_freq_points * num_coef + k * num_coef * num_freq_points + j * num_coef + m] += iw * coef[i * num_coef * num_band + m * num_band + k]; } } } } free(gp2ir); gp2ir = NULL; free(ir_grid_points); ir_grid_points = NULL; free(weights); weights = NULL; Py_RETURN_NONE; } static double get_free_energy(const double temperature, const double f) { /* temperature is defined by T (K) */ /* 'f' must be given in eV. */ return KB * temperature * log(1 - exp(- f / (KB * temperature))); } static double get_entropy(const double temperature, const double f) { /* temperature is defined by T (K) */ /* 'f' must be given in eV. */ double val; val = f / (2 * KB * temperature); return 1 / (2 * temperature) * f * cosh(val) / sinh(val) - KB * log(2 * sinh(val)); } static double get_heat_capacity(const double temperature, const double f) { /* temperature is defined by T (K) */ /* 'f' must be given in eV. */ /* If val is close to 1. Then expansion is used. */ double val, val1, val2; val = f / (KB * temperature); val1 = exp(val); val2 = (val) / (val1 - 1); return KB * val1 * val2 * val2; } /* static double get_energy(double temperature, double f){ */ /* /\* temperature is defined by T (K) *\/ */ /* /\* 'f' must be given in eV. *\/ */ /* return f / (exp(f / (KB * temperature)) - 1); */ /* } */ static int compute_permutation(int * rot_atom, PHPYCONST double lat[3][3], PHPYCONST double (*pos)[3], PHPYCONST double (*rot_pos)[3], const int num_pos, const double symprec) { int i,j,k,l; int search_start; double distance2, diff_cart; double diff[3]; for (i = 0; i < num_pos; i++) { rot_atom[i] = -1; } /* optimization: Iterate primarily by pos instead of rot_pos. */ /* (find where 0 belongs in rot_atom, then where 1 belongs, etc.) */ /* Then track the first unassigned index. */ /* */ /* This works best if the permutation is close to the identity. */ /* (more specifically, if the max value of 'rot_atom[i] - i' is small) */ search_start = 0; for (i = 0; i < num_pos; i++) { while (rot_atom[search_start] >= 0) { search_start++; } for (j = search_start; j < num_pos; j++) { if (rot_atom[j] >= 0) { continue; } for (k = 0; k < 3; k++) { diff[k] = pos[i][k] - rot_pos[j][k]; diff[k] -= nint(diff[k]); } distance2 = 0; for (k = 0; k < 3; k++) { diff_cart = 0; for (l = 0; l < 3; l++) { diff_cart += lat[k][l] * diff[l]; } distance2 += diff_cart * diff_cart; } if (sqrt(distance2) < symprec) { rot_atom[j] = i; break; } } } for (i = 0; i < num_pos; i++) { if (rot_atom[i] < 0) { printf("Encounter some problem in compute_permutation.\n"); return 0; } } return 1; } /* Implementation detail of get_smallest_vectors. */ /* Finds the smallest vectors within each list and copies them to the output. */ static void gsv_copy_smallest_vectors(double (*shortest_vectors)[27][3], int * multiplicity, PHPYCONST double (*vector_lists)[27][3], PHPYCONST double (*length_lists)[27], const int num_lists, const double symprec) { int i,j,k; int count; double minimum; double (*vectors)[3]; double * lengths; for (i = 0; i < num_lists; i++) { /* Look at a single list of 27 vectors. */ lengths = length_lists[i]; vectors = vector_lists[i]; /* Compute the minimum length. */ minimum = DBL_MAX; for (j = 0; j < 27; j++) { if (lengths[j] < minimum) { minimum = lengths[j]; } } /* Copy vectors whose length is within tolerance. */ count = 0; for (j = 0; j < 27; j++) { if (lengths[j] - minimum <= symprec) { for (k = 0; k < 3; k++) { shortest_vectors[i][count][k] = vectors[j][k]; } count++; } } multiplicity[i] = count; } } static void gsv_set_smallest_vectors(double (*smallest_vectors)[27][3], int *multiplicity, PHPYCONST double (*pos_to)[3], const int num_pos_to, PHPYCONST double (*pos_from)[3], const int num_pos_from, PHPYCONST int lattice_points[27][3], PHPYCONST double reduced_basis[3][3], PHPYCONST int trans_mat[3][3], const double symprec) { int i, j, k, l, count; double length_tmp, minimum, vec_xyz; double length[27], vec[27][3]; for (i = 0; i < num_pos_to; i++) { for (j = 0; j < num_pos_from; j++) { for (k = 0; k < 27; k++) { length[k] = 0; for (l = 0; l < 3; l++) { vec[k][l] = pos_to[i][l] - pos_from[j][l] + lattice_points[k][l]; } for (l = 0; l < 3; l++) { length_tmp = (reduced_basis[l][0] * vec[k][0] + reduced_basis[l][1] * vec[k][1] + reduced_basis[l][2] * vec[k][2]); length[k] += length_tmp * length_tmp; } length[k] = sqrt(length[k]); } minimum = DBL_MAX; for (k = 0; k < 27; k++) { if (length[k] < minimum) { minimum = length[k]; } } count = 0; for (k = 0; k < 27; k++) { if (length[k] - minimum < symprec) { for (l = 0; l < 3; l++) { /* Transform to supercell coordinates */ vec_xyz = (trans_mat[l][0] * vec[k][0] + trans_mat[l][1] * vec[k][1] + trans_mat[l][2] * vec[k][2]); smallest_vectors[i * num_pos_from + j][count][l] = vec_xyz; } count++; } } multiplicity[i * num_pos_from + j] = count; } } } static void distribute_fc2(double (*fc2)[3][3], /* shape[n_pos][n_pos] */ const int * atom_list, const int len_atom_list, PHPYCONST double (*r_carts)[3][3], /* shape[n_rot] */ const int * permutations, /* shape[n_rot][n_pos] */ const int * map_atoms, /* shape [n_pos] */ const int * map_syms, /* shape [n_pos] */ const int num_rot, const int num_pos) { int i, j, k, l, m; int atom_todo, atom_done, atom_other; int sym_index; int *atom_list_reverse; double (*fc2_done)[3]; double (*fc2_todo)[3]; double (*r_cart)[3]; const int * permutation; atom_list_reverse = NULL; atom_list_reverse = (int*)malloc(sizeof(int) * num_pos); /* atom_list_reverse[!atom_done] is undefined. */ for (i = 0; i < len_atom_list; i++) { atom_done = map_atoms[atom_list[i]]; if (atom_done == atom_list[i]) { atom_list_reverse[atom_done] = i; } } for (i = 0; i < len_atom_list; i++) { /* look up how this atom maps into the done list. */ atom_todo = atom_list[i]; atom_done = map_atoms[atom_todo]; sym_index = map_syms[atom_todo]; /* skip the atoms in the done list, */ /* which are easily identified because they map to themselves. */ if (atom_todo == atom_done) { continue; } /* look up information about the rotation */ r_cart = r_carts[sym_index]; permutation = &permutations[sym_index * num_pos]; /* shape[num_pos] */ /* distribute terms from atom_done to atom_todo */ for (atom_other = 0; atom_other < num_pos; atom_other++) { fc2_done = fc2[atom_list_reverse[atom_done] * num_pos + permutation[atom_other]]; fc2_todo = fc2[i * num_pos + atom_other]; for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { /* P' = R^-1 P R */ fc2_todo[j][k] += r_cart[l][j] * r_cart[m][k] * fc2_done[l][m]; } } } } } } free(atom_list_reverse); atom_list_reverse = NULL; } static void set_index_permutation_symmetry_fc(double * fc, const int natom) { int i, j, k, l, m, n; for (i = 0; i < natom; i++) { /* non diagonal part */ for (j = i + 1; j < natom; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { m = i * natom * 9 + j * 9 + k * 3 + l; n = j * natom * 9 + i * 9 + l * 3 + k; fc[m] += fc[n]; fc[m] /= 2; fc[n] = fc[m]; } } } /* diagnoal part */ for (k = 0; k < 2; k++) { for (l = k + 1; l < 3; l++) { m = i * natom * 9 + i * 9 + k * 3 + l; n = i * natom * 9 + i * 9 + l * 3 + k; fc[m] += fc[n]; fc[m] /= 2; fc[n] = fc[m]; } } } } static void set_translational_symmetry_fc(double * fc, const int natom) { int i, j, k, l, m; double sums[3][3]; for (i = 0; i < natom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sums[k][l] = 0; m = i * natom * 9 + k * 3 + l; for (j = 0; j < natom; j++) { if (i != j) { sums[k][l] += fc[m]; } m += 9; } } } for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { fc[i * natom * 9 + i * 9 + k * 3 + l] = -(sums[k][l] + sums[l][k]) / 2; } } } } static void set_index_permutation_symmetry_compact_fc(double * fc, const int p2s[], const int s2pp[], const int nsym_list[], const int perms[], const int n_satom, const int n_patom, const int is_transpose) { int i, j, k, l, m, n, i_p, j_p, i_trans; double fc_elem; char *done; done = NULL; done = (char*)malloc(sizeof(char) * n_satom * n_patom); for (i = 0; i < n_satom * n_patom; i++) { done[i] = 0; } for (j = 0; j < n_satom; j++) { j_p = s2pp[j]; for (i_p = 0; i_p < n_patom; i_p++) { i = p2s[i_p]; if (i == j) { /* diagnoal part */ for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { if (l > k) { m = i_p * n_satom * 9 + i * 9 + k * 3 + l; n = i_p * n_satom * 9 + i * 9 + l * 3 + k; if (is_transpose) { fc_elem = fc[m]; fc[m] = fc[n]; fc[n] = fc_elem; } else { fc[m] = (fc[m] + fc[n]) / 2; fc[n] = fc[m]; } } } } } if (!done[i_p * n_satom + j]) { /* (j, i) -- nsym_list[j] --> (j', i') */ /* nsym_list[j] translates j to j' where j' is in */ /* primitive cell. The same translation sends i to i' */ /* where i' is not necessarily to be in primitive cell. */ /* Thus, i' = perms[nsym_list[j] * n_satom + i] */ i_trans = perms[nsym_list[j] * n_satom + i]; done[i_p * n_satom + j] = 1; done[j_p * n_satom + i_trans] = 1; for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { m = i_p * n_satom * 9 + j * 9 + k * 3 + l; n = j_p * n_satom * 9 + i_trans * 9 + l * 3 + k; if (is_transpose) { fc_elem = fc[m]; fc[m] = fc[n]; fc[n] = fc_elem; } else { fc[m] = (fc[n] + fc[m]) / 2; fc[n] = fc[m]; } } } } } } free(done); done = NULL; } static void set_translational_symmetry_compact_fc(double * fc, const int p2s[], const int n_satom, const int n_patom) { int j, k, l, m, i_p; double sums[3][3]; for (i_p = 0; i_p < n_patom; i_p++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sums[k][l] = 0; m = i_p * n_satom * 9 + k * 3 + l; for (j = 0; j < n_satom; j++) { if (p2s[i_p] != j) { sums[k][l] += fc[m]; } m += 9; } } } for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { fc[i_p * n_satom * 9 + p2s[i_p] * 9 + k * 3 + l] = -(sums[k][l] + sums[l][k]) / 2; } } } } static int nint(const double a) { if (a < 0.0) return (int) (a - 0.5); else return (int) (a + 0.5); }
shared_update.c
// RUN: %libomptarget-compile-run-and-check-generic // XFAIL: nvptx64-nvidia-cuda #include <stdio.h> #include <omp.h> // --------------------------------------------------------------------------- // Various definitions copied from OpenMP RTL extern void __tgt_register_requires(int64_t); // End of definitions copied from OpenMP RTL. // --------------------------------------------------------------------------- #pragma omp requires unified_shared_memory #define N 1024 int main(int argc, char *argv[]) { int fails; void *host_alloc, *device_alloc; void *host_data, *device_data; int *alloc = (int *)malloc(N * sizeof(int)); int data[N]; // Manual registration of requires flags for Clang versions // that do not support requires. __tgt_register_requires(8); for (int i = 0; i < N; ++i) { alloc[i] = 10; data[i] = 1; } host_data = &data[0]; host_alloc = &alloc[0]; // implicit mapping of data #pragma omp target map(tofrom : device_data, device_alloc) { device_data = &data[0]; device_alloc = &alloc[0]; for (int i = 0; i < N; i++) { alloc[i] += 1; data[i] += 1; } } // CHECK: Address of alloc on device matches host address. if (device_alloc == host_alloc) printf("Address of alloc on device matches host address.\n"); // CHECK: Address of data on device matches host address. if (device_data == host_data) printf("Address of data on device matches host address.\n"); // On the host, check that the arrays have been updated. // CHECK: Alloc device values updated: Succeeded fails = 0; for (int i = 0; i < N; i++) { if (alloc[i] != 11) fails++; } printf("Alloc device values updated: %s\n", (fails == 0) ? "Succeeded" : "Failed"); // CHECK: Data device values updated: Succeeded fails = 0; for (int i = 0; i < N; i++) { if (data[i] != 2) fails++; } printf("Data device values updated: %s\n", (fails == 0) ? "Succeeded" : "Failed"); // // Test that updates on the host snd on the device are both visible. // // Update on the host. for (int i = 0; i < N; ++i) { alloc[i] += 1; data[i] += 1; } #pragma omp target { // CHECK: Alloc host values updated: Succeeded fails = 0; for (int i = 0; i < N; i++) { if (alloc[i] != 12) fails++; } printf("Alloc host values updated: %s\n", (fails == 0) ? "Succeeded" : "Failed"); // CHECK: Data host values updated: Succeeded fails = 0; for (int i = 0; i < N; i++) { if (data[i] != 3) fails++; } printf("Data host values updated: %s\n", (fails == 0) ? "Succeeded" : "Failed"); } free(alloc); printf("Done!\n"); return 0; }
nodal_residualbased_elimination_builder_and_solver_for_FSI.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi, Alessandro Franci // // #if !defined(KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI) #define KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI /* System includes */ #include <set> #ifdef _OPENMP #include <omp.h> #endif /* External includes */ // #define USE_GOOGLE_HASH #ifdef USE_GOOGLE_HASH #include "sparsehash/dense_hash_set" //included in external libraries #else #include <unordered_set> #endif /* Project includes */ #include "utilities/timer.h" #include "includes/define.h" #include "includes/key_hash.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "includes/model_part.h" #include "pfem_fluid_dynamics_application_variables.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class NodalResidualBasedEliminationBuilderAndSolverForFSI * @ingroup KratosCore * @brief Current class provides an implementation for standard builder and solving operations. * @details The RHS is constituted by the unbalanced loads (residual) * Degrees of freedom are reordered putting the restrained degrees of freedom at * the end of the system ordered in reverse order with respect to the DofSet. * Imposition of the dirichlet conditions is naturally dealt with as the residual already contains * this information. * Calculation of the reactions involves a cost very similiar to the calculation of the total residual * @author Riccardo Rossi */ template <class TSparseSpace, class TDenseSpace, //= DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class NodalResidualBasedEliminationBuilderAndSolverForFSI : public BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedEliminationBuilderAndSolverForFSI); typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef Node<3> NodeType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; typedef Vector VectorType; typedef GlobalPointersVector<Node<3>> NodeWeakPtrVectorType; ///@} ///@name Life Cycle ///@{ /** Constructor. */ NodalResidualBasedEliminationBuilderAndSolverForFSI( typename TLinearSolver::Pointer pNewLinearSystemSolver) : BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSystemSolver) { // KRATOS_INFO("NodalResidualBasedEliminationBuilderAndSolverForFSI") << "Using the standard builder and solver " << std::endl; } /** Destructor. */ ~NodalResidualBasedEliminationBuilderAndSolverForFSI() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void SetMaterialPropertiesToFluid( ModelPart::NodeIterator itNode, double &density, double &deviatoricCoeff, double &volumetricCoeff, double timeInterval, double nodalVolume) { density = itNode->FastGetSolutionStepValue(DENSITY); deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); if (volumetricCoeff > 0) { volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); double bulkReduction = density * nodalVolume / (timeInterval * volumetricCoeff); volumetricCoeff *= bulkReduction; } } void SetMaterialPropertiesToSolid( ModelPart::NodeIterator itNode, double &density, double &deviatoricCoeff, double &volumetricCoeff, double timeInterval, double nodalVolume) { density = itNode->FastGetSolutionStepValue(SOLID_DENSITY); double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS); double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO); //deviatoricCoeff=deltaT*secondLame deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5; //volumetricCoeff=bulk*deltaT=deltaT*(firstLame+2*secondLame/3) volumetricCoeff = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio)) + 2.0 * deviatoricCoeff / 3.0; } void BuildSolidNodally( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &b, double hybridCoeff) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; //contributions to the system LocalSystemMatrixType solidLHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType solidRHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different terms Element::EquationIdVectorType solidEquationId; ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); const double timeInterval = CurrentProcessInfo[DELTA_TIME]; const double FourThirds = 4.0 / 3.0; const double nTwoThirds = -2.0 / 3.0; //double theta = 0.5; double theta = 1.0; array_1d<double, 3> Acc(3, 0.0); double dNdXi = 0; double dNdYi = 0; double dNdZi = 0; double dNdXj = 0; double dNdYj = 0; double dNdZj = 0; unsigned int firstRow = 0; unsigned int firstCol = 0; double density = 0; double deviatoricCoeff = 0; double volumetricCoeff = 0; double dynamics = 1.0; //dynamics=0.0; // static problem without intertial effects /* #pragma omp parallel */ // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); double numNodesForExternalForce = 0; double nodalExternalForce = 0; bool belytsckoCase = false; bool cooksMembraneCase = false; if (cooksMembraneCase == true) { for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { double posX = itNode->X0(); if (posX > 47.999 && posX < 48.001) { numNodesForExternalForce += 1.0; } } if (numNodesForExternalForce > 0) { nodalExternalForce = 1.0 / numNodesForExternalForce; } } if (belytsckoCase == true) { for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { double posX = itNode->X0(); if (posX > 24.999 && posX < 25.001) { numNodesForExternalForce += 1.0; } } if (numNodesForExternalForce > 0) { nodalExternalForce = 40.0 / numNodesForExternalForce; } } for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { if (itNode->Is(SOLID)) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); // const unsigned int neighSize = neighb_nodes.size()+1; const unsigned int neighSize = solidNodalSFDneighboursId.size(); const double nodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME); if (neighSize > 1 && nodalVolume > 0) { const unsigned int localSize = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS).size(); if (solidLHS_Contribution.size1() != localSize) solidLHS_Contribution.resize(localSize, localSize, false); //false says not to preserve existing storage!! if (solidRHS_Contribution.size() != localSize) solidRHS_Contribution.resize(localSize, false); //false says not to preserve existing storage!! if (solidEquationId.size() != localSize) solidEquationId.resize(localSize, false); noalias(solidLHS_Contribution) = ZeroMatrix(localSize, localSize); noalias(solidRHS_Contribution) = ZeroVector(localSize); this->SetMaterialPropertiesToSolid(itNode, density, deviatoricCoeff, volumetricCoeff, timeInterval, nodalVolume); firstRow = 0; firstCol = 0; if (dimension == 2) { //////////////////////////// LHS TERMS ////////////////////////////// solidLHS_Contribution(0, 0) += nodalVolume * density * 2.0 * dynamics / timeInterval; solidLHS_Contribution(1, 1) += nodalVolume * density * 2.0 * dynamics / timeInterval; //////////////////////////// RHS TERMS ////////////////////////////// //-------- DYNAMIC FORCES TERM -------// Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 0); solidRHS_Contribution[0] += -nodalVolume * density * Acc[0] * dynamics; solidRHS_Contribution[1] += -nodalVolume * density * Acc[1] * dynamics; //-------- EXTERNAL FORCES TERM -------// array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); // double posX= itNode->X(); // double posY= itNode->Y(); // double coeffX =(12.0-24.0*posY)*pow(posX,4); // coeffX += (-24.0+48.0*posY)*pow(posX,3); // coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2); // coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX; // coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3); // double coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3); // coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2); // coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX; // coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4); // RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0]*coeffX; // RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1]*coeffY; solidRHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0]; solidRHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1]; ///////////////LOAD CONDITIONS FOR BELYTSCHKO CASE // if(itNode->X0()>24.999){ // solidRHS_Contribution[1]+=40.0/2.0; // mesh 4 (1 element per edge) //solidRHS_Contribution[1]+=40.0/3.0; // mesh 2 (2 element per edge) //solidRHS_Contribution[1]+=40.0/5.0; // mesh 1 (4 element per edge) //solidRHS_Contribution[1]+=40.0/9.0; // mesh 0.5 (8 element per edge) // solidRHS_Contribution[1]+=40.0/17.0; // mesh 0.25 (16 element per edge) // solidRHS_Contribution[1]+=40.0/33.0; // mesh 0.125 (32 element per edge) //solidRHS_Contribution[1]+=40.0/65.0; // mesh 0.0625 (64 element per edge) //} if (belytsckoCase == true) { if (itNode->X0() > 24.999 && itNode->X0() < 25.001) { solidRHS_Contribution[1] += nodalExternalForce; } } if (cooksMembraneCase == true) { if (itNode->X0() > 47.999 && itNode->X0() < 48.001) { solidRHS_Contribution[1] += nodalExternalForce; } } //-------- INTERNAL FORCES TERM -------// array_1d<double, 3> Sigma(3, 0.0); Sigma = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); solidEquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); solidEquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); for (unsigned int i = 0; i < neighSize; i++) { dNdXi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol + 1]; solidRHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[2]) * hybridCoeff; solidRHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[2]) * hybridCoeff; for (unsigned int j = 0; j < neighSize; j++) { dNdXj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow + 1]; solidLHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + dNdYj * dNdYi * deviatoricCoeff) * theta * hybridCoeff; solidLHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta * hybridCoeff; solidLHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta * hybridCoeff; solidLHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + dNdXj * dNdXi * deviatoricCoeff) * theta * hybridCoeff; firstRow += 2; } firstRow = 0; firstCol += 2; unsigned int indexNode = i + 1; if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true && indexNode < neighSize) { unsigned int other_neigh_nodes_id = solidNodalSFDneighboursId[indexNode]; // std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl; for (unsigned int k = 0; k < neighb_nodes.size(); k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); // std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl; if (neigh_nodes_id == other_neigh_nodes_id) { solidEquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId(); solidEquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); break; } } } else if (i < neighb_nodes.size()) { solidEquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); solidEquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); } } /* std::cout << "LHS_Contribution = " << LHS_Contribution << std::endl; */ } else if (dimension == 3) { //////////////////////////// LHS TERMS ////////////////////////////// solidLHS_Contribution(0, 0) += nodalVolume * density * 2.0 / timeInterval; solidLHS_Contribution(1, 1) += nodalVolume * density * 2.0 / timeInterval; solidLHS_Contribution(2, 2) += nodalVolume * density * 2.0 / timeInterval; //////////////////////////// RHS TERMS ////////////////////////////// //-------- DYNAMIC FORCES TERM -------// Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 0); solidRHS_Contribution[0] += -nodalVolume * density * Acc[0]; solidRHS_Contribution[1] += -nodalVolume * density * Acc[1]; solidRHS_Contribution[2] += -nodalVolume * density * Acc[2]; //-------- EXTERNAL FORCES TERM -------// array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); solidRHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0]; solidRHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1]; solidRHS_Contribution[2] += nodalVolume * density * VolumeAcceleration[2]; ///////////////LOAD CONDITIONS FOR BELITSCHKO CASE // if(itNode->X0()>24.999){ // // solidRHS_Contribution[1]+=40.0/2.0; // mesh 4 (1 element per edge) // // solidRHS_Contribution[1]+=40.0/3.0; // mesh 2 (2 element per edge) // // solidRHS_Contribution[1]+=40.0/5.0; // mesh 1 (4 element per edge) // solidRHS_Contribution[1]+=40.0/27.0; // mesh 0.5 (8 element per edge, 2 per width) // // solidRHS_Contribution[1]+=40.0/17.0; // mesh 0.25 (16 element per edge) // // solidRHS_Contribution[1]+=40.0/33.0; // mesh 0.125 (32 element per edge) // // solidRHS_Contribution[1]+=40.0/65.0; // mesh 0.0625 (64 element per edge) // } //-------- INTERNAL FORCES TERM -------// array_1d<double, 6> Sigma(6, 0.0); Sigma = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); // } const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); solidEquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); solidEquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); solidEquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); for (unsigned int i = 0; i < neighSize; i++) { dNdXi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol + 1]; dNdZi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol + 2]; solidRHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[3] + dNdZi * Sigma[4]); solidRHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[3] + dNdZi * Sigma[5]); solidRHS_Contribution[firstCol + 2] += -nodalVolume * (dNdZi * Sigma[2] + dNdXi * Sigma[4] + dNdYi * Sigma[5]); for (unsigned int j = 0; j < neighSize; j++) { dNdXj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow + 1]; dNdZj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow + 2]; solidLHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + (dNdYj * dNdYi + dNdZj * dNdZi) * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdZi + dNdZj * dNdXi * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + (dNdXj * dNdXi + dNdZj * dNdZi) * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow + 1, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdZi + dNdZj * dNdYi * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow + 2, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdXi + dNdXj * dNdZi * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow + 2, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdYi + dNdYj * dNdZi * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow + 2, firstCol + 2) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdZi + (dNdXj * dNdXi + dNdYj * dNdYi) * deviatoricCoeff) * theta; firstRow += 3; } firstRow = 0; firstCol += 3; unsigned int indexNode = i + 1; if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true && indexNode < neighSize) { unsigned int other_neigh_nodes_id = solidNodalSFDneighboursId[indexNode]; // std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl; for (unsigned int k = 0; k < neighb_nodes.size(); k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); // std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl; if (neigh_nodes_id == other_neigh_nodes_id) { solidEquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId(); solidEquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); solidEquationId[firstCol + 2] = neighb_nodes[k].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); break; } } } else if (i < neighb_nodes.size()) { solidEquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); solidEquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); solidEquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } } } #ifdef _OPENMP Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId, mlock_array); #else Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId); #endif } } } // } KRATOS_CATCH("") } void BuildFluidNodally( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &b) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; /* std::cout<<"Building LHS and RHS of Momentum Equation Nodally"<<std::endl; */ //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different terms Element::EquationIdVectorType EquationId; ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); const double timeInterval = CurrentProcessInfo[DELTA_TIME]; const double FourThirds = 4.0 / 3.0; const double nTwoThirds = -2.0 / 3.0; double theta = 0.5; array_1d<double, 3> Acc(3, 0.0); // array_1d<double,6> Sigma(6,0.0); double pressure = 0; double dNdXi = 0; double dNdYi = 0; double dNdZi = 0; double dNdXj = 0; double dNdYj = 0; double dNdZj = 0; unsigned int firstRow = 0; unsigned int firstCol = 0; double density = 0; double deviatoricCoeff = 0; double volumetricCoeff = 0; /* #pragma omp parallel */ // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { if ((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); // const unsigned int neighSize = neighb_nodes.size()+1; const unsigned int neighSize = nodalSFDneighboursId.size(); const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); if (neighSize > 1 && nodalVolume > 0) { const unsigned int localSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS).size(); if (LHS_Contribution.size1() != localSize) LHS_Contribution.resize(localSize, localSize, false); //false says not to preserve existing storage!! if (RHS_Contribution.size() != localSize) RHS_Contribution.resize(localSize, false); //false says not to preserve existing storage!! if (EquationId.size() != localSize) EquationId.resize(localSize, false); noalias(LHS_Contribution) = ZeroMatrix(localSize, localSize); noalias(RHS_Contribution) = ZeroVector(localSize); this->SetMaterialPropertiesToFluid(itNode, density, deviatoricCoeff, volumetricCoeff, timeInterval, nodalVolume); // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // // std::cout<<"density,deviatoricCoeff,volumetricCoeff "<<density<<" "<<deviatoricCoeff<<" "<<volumetricCoeff<<std::endl; // std::cout<<"INTERFACE nodalVolume "<<nodalVolume<<std::endl; // }else{ // std::cout<<"nodalVolume "<<nodalVolume<<std::endl; // } firstRow = 0; firstCol = 0; if (dimension == 2) { //////////////////////////// LHS TERMS ////////////////////////////// LHS_Contribution(0, 0) += nodalVolume * density * 2.0 / timeInterval; LHS_Contribution(1, 1) += nodalVolume * density * 2.0 / timeInterval; //////////////////////////// RHS TERMS ////////////////////////////// //-------- DYNAMIC FORCES TERM -------// Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 0); RHS_Contribution[0] += -nodalVolume * density * Acc[0]; RHS_Contribution[1] += -nodalVolume * density * Acc[1]; //-------- EXTERNAL FORCES TERM -------// array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); // double posX= itNode->X(); // double posY= itNode->Y(); // double coeffX =(12.0-24.0*posY)*pow(posX,4); // coeffX += (-24.0+48.0*posY)*pow(posX,3); // coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2); // coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX; // coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3); // double coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3); // coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2); // coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX; // coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4); // RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0]*coeffX; // RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1]*coeffY; RHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0]; RHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1]; //-------- INTERNAL FORCES TERM -------// array_1d<double, 3> Sigma(3, 0.0); Sigma = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); // } if (itNode->IsNot(SOLID) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { pressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) * theta + itNode->FastGetSolutionStepValue(PRESSURE, 1) * (1 - theta); Sigma[0] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0] + pressure; Sigma[1] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1] + pressure; } const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); for (unsigned int i = 0; i < neighSize; i++) { dNdXi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 1]; RHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[2]); RHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[2]); for (unsigned int j = 0; j < neighSize; j++) { dNdXj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 1]; LHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + dNdYj * dNdYi * deviatoricCoeff) * theta; LHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + dNdXj * dNdXi * deviatoricCoeff) * theta; firstRow += 2; } firstRow = 0; firstCol += 2; unsigned int indexNode = i + 1; if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true && indexNode < neighSize) { unsigned int other_neigh_nodes_id = nodalSFDneighboursId[indexNode]; // std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl; for (unsigned int k = 0; k < neighb_nodes.size(); k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); // std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl; if (neigh_nodes_id == other_neigh_nodes_id) { EquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); break; } } } else if (i < neighb_nodes.size()) { EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); } } /* std::cout << "LHS_Contribution = " << LHS_Contribution << std::endl; */ } else if (dimension == 3) { //////////////////////////// LHS TERMS ////////////////////////////// LHS_Contribution(0, 0) += nodalVolume * density * 2.0 / timeInterval; LHS_Contribution(1, 1) += nodalVolume * density * 2.0 / timeInterval; LHS_Contribution(2, 2) += nodalVolume * density * 2.0 / timeInterval; //////////////////////////// RHS TERMS ////////////////////////////// //-------- DYNAMIC FORCES TERM -------// Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 0); RHS_Contribution[0] += -nodalVolume * density * Acc[0]; RHS_Contribution[1] += -nodalVolume * density * Acc[1]; RHS_Contribution[2] += -nodalVolume * density * Acc[2]; //-------- EXTERNAL FORCES TERM -------// array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); RHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0]; RHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1]; RHS_Contribution[2] += nodalVolume * density * VolumeAcceleration[2]; //-------- INTERNAL FORCES TERM -------// array_1d<double, 6> Sigma(6, 0.0); Sigma = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); // } if (itNode->IsNot(SOLID) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { pressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) * theta + itNode->FastGetSolutionStepValue(PRESSURE, 1) * (1 - theta); Sigma[0] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0] + pressure; Sigma[1] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1] + pressure; Sigma[2] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2] + pressure; } const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); EquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); for (unsigned int i = 0; i < neighSize; i++) { dNdXi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 1]; dNdZi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 2]; RHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[3] + dNdZi * Sigma[4]); RHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[3] + dNdZi * Sigma[5]); RHS_Contribution[firstCol + 2] += -nodalVolume * (dNdZi * Sigma[2] + dNdXi * Sigma[4] + dNdYi * Sigma[5]); for (unsigned int j = 0; j < neighSize; j++) { dNdXj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 1]; dNdZj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 2]; LHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + (dNdYj * dNdYi + dNdZj * dNdZi) * deviatoricCoeff) * theta; LHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta; LHS_Contribution(firstRow, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdZi + dNdZj * dNdXi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + (dNdXj * dNdXi + dNdZj * dNdZi) * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdZi + dNdZj * dNdYi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 2, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdXi + dNdXj * dNdZi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 2, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdYi + dNdYj * dNdZi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 2, firstCol + 2) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdZi + (dNdXj * dNdXi + dNdYj * dNdYi) * deviatoricCoeff) * theta; firstRow += 3; } firstRow = 0; firstCol += 3; unsigned int indexNode = i + 1; if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true && indexNode < neighSize) { unsigned int other_neigh_nodes_id = nodalSFDneighboursId[indexNode]; // std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl; for (unsigned int k = 0; k < neighb_nodes.size(); k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); // std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl; if (neigh_nodes_id == other_neigh_nodes_id) { EquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); EquationId[firstCol + 2] = neighb_nodes[k].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); break; } } } else if (i < neighb_nodes.size()) { EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); EquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } } } #ifdef _OPENMP Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif } } } // } KRATOS_CATCH("") } /** * @brief This is a call to the linear system solver * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void SystemSolve( TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else TSparseSpace::SetToZero(Dx); // Prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** *@brief This is a call to the linear system solver (taking into account some physical particularities of the problem) * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector * @param rModelPart The model part of the problem to solve */ void SystemSolveWithPhysics( TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b, ModelPart &rModelPart) { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //provide physical data as needed if (BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded()) BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart); //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else { TSparseSpace::SetToZero(Dx); KRATOS_WARNING_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl; } // Prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the building and solving phase at the same time. * @details It is ideally the fastest and safer function to use when it is possible to solve * just after building * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { KRATOS_TRY Timer::Start("Build"); // boost::timer m_build_time; double hybridCoeff = 1.0; // 0.5: half nodal - half elemental; 1.0 all nodal; 0.0 all elemental BuildSolidNodally(pScheme, rModelPart, A, b, hybridCoeff); if (hybridCoeff < 0.99999999) { BuildElementally(pScheme, rModelPart, A, b); } BuildFluidNodally(pScheme, rModelPart, A, b); // std::cout << "MOMENTUM EQ: build_time : " << m_build_time.elapsed() << std::endl; Timer::Stop("Build"); // ApplyPointLoads(pScheme,rModelPart,b); // Does nothing...dirichlet conditions are naturally dealt with in defining the residual ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b); KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; // const double start_solve = OpenMPUtils::GetCurrentTime(); // Timer::Start("Solve"); /* boost::timer m_solve_time; */ SystemSolveWithPhysics(A, Dx, b, rModelPart); /* std::cout << "MOMENTUM EQ: solve_time : " << m_solve_time.elapsed() << std::endl; */ // Timer::Stop("Solve"); // const double stop_solve = OpenMPUtils::GetCurrentTime(); // KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl; KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; KRATOS_CATCH("") } void BuildElementally( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &rA, TSystemVectorType &rb) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; //getting the elements from the model const int nelements = static_cast<int>(rModelPart.Elements().size()); //getting the array of the conditions const int nconditions = static_cast<int>(rModelPart.Conditions().size()); const ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin(); ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // const double start_build = OpenMPUtils::GetCurrentTime(); // assemble all elements #pragma omp parallel firstprivate(nelements, nconditions, LHS_Contribution, RHS_Contribution, EquationId) { #pragma omp for schedule(guided, 512) nowait for (int k = 0; k < nelements; k++) { ModelPart::ElementsContainerType::iterator it = el_begin + k; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool element_is_active = true; if ((it)->IsDefined(ACTIVE)) element_is_active = (it)->Is(ACTIVE); if (element_is_active) { //calculate elemental contribution pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY AssembleElementally(rA, rb, LHS_Contribution, RHS_Contribution, EquationId, mLockArray); #else AssembleElementally(rA, rb, LHS_Contribution, RHS_Contribution, EquationId); #endif } } #pragma omp for schedule(guided, 512) for (int k = 0; k < nconditions; k++) { ModelPart::ConditionsContainerType::iterator it = cond_begin + k; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool condition_is_active = true; if ((it)->IsDefined(ACTIVE)) condition_is_active = (it)->Is(ACTIVE); if (condition_is_active) { //calculate elemental contribution pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); #ifdef USE_LOCKS_IN_ASSEMBLY AssembleElementally(rA, rb, LHS_Contribution, RHS_Contribution, EquationId, mLockArray); #else AssembleElementally(rA, rb, LHS_Contribution, RHS_Contribution, EquationId); #endif } } } // const double stop_build = OpenMPUtils::GetCurrentTime(); // KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System build time: " << stop_build - start_build << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished building" << std::endl; KRATOS_CATCH("") } void AssembleElementally( TSystemMatrixType &rA, TSystemVectorType &rb, const LocalSystemMatrixType &rLHSContribution, const LocalSystemVectorType &rRHSContribution, const Element::EquationIdVectorType &rEquationId #ifdef USE_LOCKS_IN_ASSEMBLY , std::vector<omp_lock_t> &rLockArray #endif ) { unsigned int local_size = rLHSContribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { #ifdef USE_LOCKS_IN_ASSEMBLY omp_set_lock(&rLockArray[i_global]); b[i_global] += rRHSContribution(i_local); #else double &r_a = rb[i_global]; const double &v_a = rRHSContribution(i_local); #pragma omp atomic r_a += v_a; #endif AssembleRowContributionFreeDofs(rA, rLHSContribution, i_global, i_local, rEquationId); #ifdef USE_LOCKS_IN_ASSEMBLY omp_unset_lock(&rLockArray[i_global]); #endif } //note that computation of reactions is not performed here! } } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element * and condition its Dofs. * @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the * way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart) override { KRATOS_TRY; KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl; //Gets the array of elements from the modeler ElementsArrayType &pElements = rModelPart.Elements(); const int nelements = static_cast<int>(pElements.size()); Element::DofsVectorType ElementalDofList; ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); unsigned int nthreads = ParallelUtilities::GetNumThreads(); // typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type; // typedef std::unordered_set < NodeType::DofType::Pointer, // DofPointerHasher, // DofPointerComparor, // allocator_type > set_type; #ifdef USE_GOOGLE_HASH typedef google::dense_hash_set<NodeType::DofType::Pointer, DofPointerHasher> set_type; #else typedef std::unordered_set<NodeType::DofType::Pointer, DofPointerHasher> set_type; #endif // std::vector<set_type> dofs_aux_list(nthreads); // std::vector<allocator_type> allocators(nthreads); for (int i = 0; i < static_cast<int>(nthreads); i++) { #ifdef USE_GOOGLE_HASH dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer()); #else // dofs_aux_list[i] = set_type( allocators[i]); dofs_aux_list[i].reserve(nelements); #endif } // #pragma omp parallel for firstprivate(nelements, ElementalDofList) for (int i = 0; i < static_cast<int>(nelements); ++i) { auto it_elem = pElements.begin() + i; const IndexType this_thread_id = OpenMPUtils::ThisThread(); // Gets list of Dof involved on every element pScheme->GetDofList(*it_elem, ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } ConditionsArrayType &pConditions = rModelPart.Conditions(); const int nconditions = static_cast<int>(pConditions.size()); #pragma omp parallel for firstprivate(nconditions, ElementalDofList) for (int i = 0; i < nconditions; ++i) { auto it_cond = pConditions.begin() + i; const IndexType this_thread_id = OpenMPUtils::ThisThread(); // Gets list of Dof involved on every element pScheme->GetDofList(*it_cond, ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } //here we do a reduction in a tree so to have everything on thread 0 unsigned int old_max = nthreads; unsigned int new_max = ceil(0.5 * static_cast<double>(old_max)); while (new_max >= 1 && new_max != old_max) { // //just for debugging // std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl; // for (int i = 0; i < new_max; i++) // { // if (i + new_max < old_max) // { // std::cout << i << " - " << i + new_max << std::endl; // } // } // std::cout << "********************" << std::endl; #pragma omp parallel for for (int i = 0; i < static_cast<int>(new_max); i++) { if (i + new_max < old_max) { dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end()); dofs_aux_list[i + new_max].clear(); } } old_max = new_max; new_max = ceil(0.5 * static_cast<double>(old_max)); } DofsArrayType Doftemp; BaseType::mDofSet = DofsArrayType(); Doftemp.reserve(dofs_aux_list[0].size()); for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++) { Doftemp.push_back(*it); } Doftemp.Sort(); BaseType::mDofSet = Doftemp; // Throws an execption if there are no Degrees of freedom involved in the analysis KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl; BaseType::mDofSetIsInitialized = true; KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl; #ifdef _OPENMP if (mlock_array.size() != 0) { for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) omp_destroy_lock(&mlock_array[i]); } mlock_array.resize(BaseType::mDofSet.size()); for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) omp_init_lock(&mlock_array[i]); #endif // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if (BaseType::GetCalculateReactionsFlag()) { for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl << "Node : " << dof_iterator->Id() << std::endl << "Dof : " << (*dof_iterator) << std::endl << "Not possible to calculate reactions." << std::endl; } } #endif KRATOS_CATCH(""); } /** * @brief Organises the dofset in order to speed up the building phase * @param rModelPart The model part of the problem to solve */ void SetUpSystem( ModelPart &rModelPart) override { // Set equation id for degrees of freedom // the free degrees of freedom are positioned at the beginning of the system, // while the fixed one are at the end (in opposite order). // // that means that if the EquationId is greater than "mEquationSystemSize" // the pointed degree of freedom is restrained // int free_id = 0; int fix_id = BaseType::mDofSet.size(); for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) if (dof_iterator->IsFixed()) dof_iterator->SetEquationId(--fix_id); else dof_iterator->SetEquationId(free_id++); BaseType::mEquationSystemSize = fix_id; } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType &pA, TSystemVectorPointerType &pDx, TSystemVectorPointerType &pb, ModelPart &rModelPart) override { KRATOS_TRY // boost::timer m_contruct_matrix; if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); pA.swap(pNewA); } if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0)); pDx.swap(pNewDx); } if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0)); pb.swap(pNewb); } if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0)); BaseType::mpReactionsVector.swap(pNewReactionsVector); } TSystemMatrixType &A = *pA; TSystemVectorType &Dx = *pDx; TSystemVectorType &b = *pb; //resizing the system vectors and matrix if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false); ConstructMatrixStructureForFSI(pScheme, A, rModelPart); } else { if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize) { KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW"); KRATOS_ERROR << "The equation system size has changed during the simulation. This is not permited." << std::endl; A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true); ConstructMatrixStructureForFSI(pScheme, A, rModelPart); } } if (Dx.size() != BaseType::mEquationSystemSize) Dx.resize(BaseType::mEquationSystemSize, false); if (b.size() != BaseType::mEquationSystemSize) b.resize(BaseType::mEquationSystemSize, false); //if needed resize the vector for the calculation of reactions if (BaseType::mCalculateReactionsFlag == true) { unsigned int ReactionsVectorSize = BaseType::mDofSet.size(); if (BaseType::mpReactionsVector->size() != ReactionsVectorSize) BaseType::mpReactionsVector->resize(ReactionsVectorSize, false); } // std::cout << "MOMENTUM EQ: contruct_matrix : " << m_contruct_matrix.elapsed() << std::endl; KRATOS_CATCH("") } inline void AssembleRowContributionFreeDofs( TSystemMatrixType &rA, const Matrix &rALocal, const IndexType i, const IndexType i_local, const Element::EquationIdVectorType &EquationId) { double *values_vector = rA.value_data().begin(); std::size_t *index1_vector = rA.index1_data().begin(); std::size_t *index2_vector = rA.index2_data().begin(); const std::size_t left_limit = index1_vector[i]; // Find the first entry // We iterate over the equation ids until we find the first equation id to be considered // We count in which component we find an ID std::size_t last_pos = 0; std::size_t last_found = 0; std::size_t counter = 0; for (std::size_t j = 0; j < EquationId.size(); ++j) { ++counter; const std::size_t j_global = EquationId[j]; if (j_global < BaseType::mEquationSystemSize) { last_pos = ForwardFind(j_global, left_limit, index2_vector); last_found = j_global; break; } } // If the counter is equal to the size of the EquationID vector that means that only one dof will be considered, if the number is greater means that all the dofs are fixed. If the number is below means that at we have several dofs free to be considered if (counter <= EquationId.size()) { #ifndef USE_LOCKS_IN_ASSEMBLY double &r_a = values_vector[last_pos]; const double &v_a = rALocal(i_local, counter - 1); #pragma omp atomic r_a += v_a; #else values_vector[last_pos] += rALocal(i_local, counter - 1); #endif // Now find all of the other entries std::size_t pos = 0; for (std::size_t j = counter; j < EquationId.size(); ++j) { std::size_t id_to_find = EquationId[j]; if (id_to_find < BaseType::mEquationSystemSize) { if (id_to_find > last_found) pos = ForwardFind(id_to_find, last_pos + 1, index2_vector); else if (id_to_find < last_found) pos = BackwardFind(id_to_find, last_pos - 1, index2_vector); else pos = last_pos; #ifndef USE_LOCKS_IN_ASSEMBLY double &r = values_vector[pos]; const double &v = rALocal(i_local, j); #pragma omp atomic r += v; #else values_vector[pos] += Alocal(i_local, j); #endif last_found = id_to_find; last_pos = pos; } } } } inline std::size_t ForwardFind(const std::size_t id_to_find, const std::size_t start, const std::size_t *index_vector) { std::size_t pos = start; while (id_to_find != index_vector[pos]) pos++; return pos; } inline std::size_t BackwardFind(const std::size_t id_to_find, const std::size_t start, const std::size_t *index_vector) { std::size_t pos = start; while (id_to_find != index_vector[pos]) pos--; return pos; } //************************************************************************** //************************************************************************** /** * @brief Applies the dirichlet conditions. This operation may be very heavy or completely * unexpensive depending on the implementation choosen and on how the System Matrix is built. * @details For explanation of how it works for a particular implementation the user * should refer to the particular Builder And Solver choosen * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() override { this->mDofSet = DofsArrayType(); if (this->mpReactionsVector != NULL) TSparseSpace::Clear((this->mpReactionsVector)); // this->mReactionsVector = TSystemVectorType(); this->mpLinearSystemSolver->Clear(); KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl; } /** * @brief This function is designed to be called once to perform all the checks needed * on the input provided. Checks can be "expensive" as the function is designed * to catch user's errors. * @param rModelPart The model part of the problem to solve * @return 0 all ok */ int Check(ModelPart &rModelPart) override { KRATOS_TRY return 0; KRATOS_CATCH(""); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ void Assemble( TSystemMatrixType &A, TSystemVectorType &b, const LocalSystemMatrixType &LHS_Contribution, const LocalSystemVectorType &RHS_Contribution, const Element::EquationIdVectorType &EquationId #ifdef _OPENMP , std::vector<omp_lock_t> &lock_array #endif ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&lock_array[i_global]); #endif b[i_global] += RHS_Contribution(i_local); for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; if (j_global < BaseType::mEquationSystemSize) { A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } #ifdef _OPENMP omp_unset_lock(&lock_array[i_global]); #endif } //note that assembly on fixed rows is not performed here } } //************************************************************************** virtual void ConstructMatrixStructureForFSI( typename TSchemeType::Pointer pScheme, TSystemMatrixType &A, ModelPart &rModelPart) { //filling with zero the matrix (creating the structure) Timer::Start("MatrixStructure"); ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); // Getting the array of the conditions const int nconditions = static_cast<int>(rModelPart.Conditions().size()); ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); const std::size_t equation_size = BaseType::mEquationSystemSize; #ifdef USE_GOOGLE_HASH std::vector<google::dense_hash_set<std::size_t>> indices(equation_size); const std::size_t empty_key = 2 * equation_size + 10; #else std::vector<std::unordered_set<std::size_t>> indices(equation_size); #endif #pragma omp parallel for firstprivate(equation_size) for (int iii = 0; iii < static_cast<int>(equation_size); iii++) { #ifdef USE_GOOGLE_HASH indices[iii].set_empty_key(empty_key); #else indices[iii].reserve(40); #endif } Element::EquationIdVectorType EquationId; ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { if (itNode->Is(SOLID)) { const unsigned int localSize = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS).size(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); const unsigned int neighSize = nodalSFDneighboursId.size(); if (EquationId.size() != localSize) EquationId.resize(localSize, false); unsigned int firstCol = 0; const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) EquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); for (unsigned int i = 0; i < neighb_nodes.size(); i++) { unsigned int indexNode = i + 1; if (indexNode < neighSize) { unsigned int other_neigh_nodes_id = nodalSFDneighboursId[indexNode]; firstCol += dimension; for (unsigned int k = 0; k < neighb_nodes.size(); k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); if (neigh_nodes_id == other_neigh_nodes_id) { EquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) { EquationId[firstCol + 2] = neighb_nodes[k].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } break; } } } } } else { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); for (unsigned int i = 0; i < neighb_nodes.size(); i++) { firstCol += dimension; EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) { EquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } } } } if ((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { const unsigned int localSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS).size(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); const unsigned int neighSize = nodalSFDneighboursId.size(); if (EquationId.size() != localSize) EquationId.resize(localSize, false); unsigned int firstCol = 0; const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) EquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); for (unsigned int i = 0; i < neighb_nodes.size(); i++) { unsigned int indexNode = i + 1; if (indexNode < neighSize) { unsigned int other_neigh_nodes_id = nodalSFDneighboursId[indexNode]; firstCol += dimension; for (unsigned int k = 0; k < neighb_nodes.size(); k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); if (neigh_nodes_id == other_neigh_nodes_id) { EquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) { EquationId[firstCol + 2] = neighb_nodes[k].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } break; } } } } } else { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); for (unsigned int i = 0; i < neighb_nodes.size(); i++) { firstCol += dimension; EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) { EquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } } } } for (std::size_t i = 0; i < EquationId.size(); i++) { if (EquationId[i] < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&mlock_array[EquationId[i]]); #endif auto &row_indices = indices[EquationId[i]]; for (auto it = EquationId.begin(); it != EquationId.end(); it++) { if (*it < BaseType::mEquationSystemSize) row_indices.insert(*it); } #ifdef _OPENMP omp_unset_lock(&mlock_array[EquationId[i]]); #endif } } } Element::EquationIdVectorType ids(3, 0); #pragma omp parallel for firstprivate(nconditions, ids) for (int iii = 0; iii < nconditions; iii++) { typename ConditionsArrayType::iterator i_condition = cond_begin + iii; pScheme->EquationId(*i_condition, ids, CurrentProcessInfo); for (std::size_t i = 0; i < ids.size(); i++) { if (ids[i] < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&mlock_array[ids[i]]); #endif auto &row_indices = indices[ids[i]]; for (auto it = ids.begin(); it != ids.end(); it++) { if (*it < BaseType::mEquationSystemSize) row_indices.insert(*it); } #ifdef _OPENMP omp_unset_lock(&mlock_array[ids[i]]); #endif } } } //count the row sizes unsigned int nnz = 0; for (unsigned int i = 0; i < indices.size(); i++) nnz += indices[i].size(); A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz); double *Avalues = A.value_data().begin(); std::size_t *Arow_indices = A.index1_data().begin(); std::size_t *Acol_indices = A.index2_data().begin(); //filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Arow_indices[0] = 0; for (int i = 0; i < static_cast<int>(A.size1()); i++) Arow_indices[i + 1] = Arow_indices[i] + indices[i].size(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(A.size1()); i++) { const unsigned int row_begin = Arow_indices[i]; const unsigned int row_end = Arow_indices[i + 1]; unsigned int k = row_begin; for (auto it = indices[i].begin(); it != indices[i].end(); it++) { Acol_indices[k] = *it; Avalues[k] = 0.0; k++; } std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); } A.set_filled(indices.size() + 1, nnz); Timer::Stop("MatrixStructure"); } void AssembleLHS( TSystemMatrixType &A, LocalSystemMatrixType &LHS_Contribution, Element::EquationIdVectorType &EquationId) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; if (j_global < BaseType::mEquationSystemSize) A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ #ifdef _OPENMP std::vector<omp_lock_t> mlock_array; #endif ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ inline void AddUnique(std::vector<std::size_t> &v, const std::size_t &candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while (i != endit && (*i) != candidate) { i++; } if (i == endit) { v.push_back(candidate); } } void AssembleRHS( TSystemVectorType &b, const LocalSystemVectorType &RHS_Contribution, const Element::EquationIdVectorType &EquationId) { unsigned int local_size = RHS_Contribution.size(); if (BaseType::mCalculateReactionsFlag == false) { for (unsigned int i_local = 0; i_local < local_size; i_local++) { const unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) //free dof { // ASSEMBLING THE SYSTEM VECTOR double &b_value = b[i_global]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } } else { TSystemVectorType &ReactionsVector = *BaseType::mpReactionsVector; for (unsigned int i_local = 0; i_local < local_size; i_local++) { const unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) //free dof { // ASSEMBLING THE SYSTEM VECTOR double &b_value = b[i_global]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } else //fixed dof { double &b_value = ReactionsVector[i_global - BaseType::mEquationSystemSize]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } } } //************************************************************************** void AssembleLHS_CompleteOnFreeRows( TSystemMatrixType &A, LocalSystemMatrixType &LHS_Contribution, Element::EquationIdVectorType &EquationId) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (unsigned int j_local = 0; j_local < local_size; j_local++) { int j_global = EquationId[j_local]; A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class NodalResidualBasedEliminationBuilderAndSolverForFSI */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI defined */
GB_binop__gt_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__gt_uint32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__gt_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__gt_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_uint32) // A*D function (colscale): GB (_AxD__gt_uint32) // D*A function (rowscale): GB (_DxB__gt_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__gt_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__gt_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_uint32) // C=scalar+B GB (_bind1st__gt_uint32) // C=scalar+B' GB (_bind1st_tran__gt_uint32) // C=A+scalar GB (_bind2nd__gt_uint32) // C=A'+scalar GB (_bind2nd_tran__gt_uint32) // C type: bool // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_UINT32 || GxB_NO_GT_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__gt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__gt_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__gt_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__gt_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__gt_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__gt_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__gt_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__gt_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__gt_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__gt_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__gt_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__gt_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__gt_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__gt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
image-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickCore Image View Methods % % % % Software Design % % Cristy % % March 2003 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/MagickCore.h" #include "MagickCore/exception-private.h" #include "MagickCore/monitor-private.h" #include "MagickCore/thread-private.h" /* Typedef declarations. */ struct _ImageView { char *description; RectangleInfo extent; Image *image; CacheView *view; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageView() makes a copy of the specified image view. % % The format of the CloneImageView method is: % % ImageView *CloneImageView(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport ImageView *CloneImageView(const ImageView *image_view) { ImageView *clone_view; assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); clone_view=(ImageView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (ImageView *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(clone_view,0,sizeof(*clone_view)); clone_view->description=ConstantString(image_view->description); clone_view->extent=image_view->extent; clone_view->view=CloneCacheView(image_view->view); clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,image_view->exception); clone_view->debug=image_view->debug; clone_view->signature=MagickCoreSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageView() deallocates memory associated with a image view. % % The format of the DestroyImageView method is: % % ImageView *DestroyImageView(ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport ImageView *DestroyImageView(ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); if (image_view->description != (char *) NULL) image_view->description=DestroyString(image_view->description); image_view->view=DestroyCacheView(image_view->view); image_view->exception=DestroyExceptionInfo(image_view->exception); image_view->signature=(~MagickCoreSignature); image_view=(ImageView *) RelinquishMagickMemory(image_view); return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferImageViewIterator() iterates over three image views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination image view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const ImageView *source, % const ImageView *duplex,ImageView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferImageViewIterator method is: % % MagickBooleanType DuplexTransferImageViewIterator(ImageView *source, % ImageView *duplex,ImageView *destination, % DuplexTransferImageViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o duplex: the duplex image view. % % o destination: the destination image view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType DuplexTransferImageViewIterator( ImageView *source,ImageView *duplex,ImageView *destination, DuplexTransferImageViewMethod transfer,void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (transfer == (DuplexTransferImageViewMethod) NULL) return(MagickFalse); source_image=source->image; destination_image=destination->image; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict duplex_pixels, *magick_restrict pixels; register Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_DuplexTransferImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewAuthenticMetacontent() returns the image view authentic % meta-content. % % The format of the GetImageViewAuthenticPixels method is: % % void *GetImageViewAuthenticMetacontent( % const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport void *GetImageViewAuthenticMetacontent( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewAuthenticMetacontent(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewAuthenticPixels() returns the image view authentic pixels. % % The format of the GetImageViewAuthenticPixels method is: % % Quantum *GetImageViewAuthenticPixels(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport Quantum *GetImageViewAuthenticPixels( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewAuthenticPixelQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewException() returns the severity, reason, and description of any % error that occurs when utilizing a image view. % % The format of the GetImageViewException method is: % % char *GetImageViewException(const PixelImage *image_view, % ExceptionType *severity) % % A description of each parameter follows: % % o image_view: the pixel image_view. % % o severity: the severity of the error is returned here. % */ MagickExport char *GetImageViewException(const ImageView *image_view, ExceptionType *severity) { char *description; assert(image_view != (const ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); assert(severity != (ExceptionType *) NULL); *severity=image_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MagickPathExtent, sizeof(*description)); if (description == (char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *description='\0'; if (image_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( image_view->exception->severity,image_view->exception->reason), MagickPathExtent); if (image_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MagickPathExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( image_view->exception->severity,image_view->exception->description), MagickPathExtent); (void) ConcatenateMagickString(description,")",MagickPathExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewExtent() returns the image view extent. % % The format of the GetImageViewExtent method is: % % RectangleInfo GetImageViewExtent(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport RectangleInfo GetImageViewExtent(const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(image_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewImage() returns the image associated with the image view. % % The format of the GetImageViewImage method is: % % MagickCore *GetImageViewImage(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport Image *GetImageViewImage(const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(image_view->image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewIterator() iterates over the image view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const ImageView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetImageViewIterator method is: % % MagickBooleanType GetImageViewIterator(ImageView *source, % GetImageViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o get: the get callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType GetImageViewIterator(ImageView *source, GetImageViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (get == (GetImageViewMethod) NULL) return(MagickFalse); source_image=source->image; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const Quantum *pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewVirtualMetacontent() returns the image view virtual % meta-content. % % The format of the GetImageViewVirtualMetacontent method is: % % const void *GetImageViewVirtualMetacontent( % const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport const void *GetImageViewVirtualMetacontent( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewVirtualMetacontent(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewVirtualPixels() returns the image view virtual pixels. % % The format of the GetImageViewVirtualPixels method is: % % const Quantum *GetImageViewVirtualPixels(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport const Quantum *GetImageViewVirtualPixels( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewVirtualPixelQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageView() returns MagickTrue if the the parameter is verified as a image % view object. % % The format of the IsImageView method is: % % MagickBooleanType IsImageView(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport MagickBooleanType IsImageView(const ImageView *image_view) { if (image_view == (const ImageView *) NULL) return(MagickFalse); if (image_view->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewImageView() returns a image view required for all other methods in the % Image View API. % % The format of the NewImageView method is: % % ImageView *NewImageView(MagickCore *wand,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageView *NewImageView(Image *image,ExceptionInfo *exception) { ImageView *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); image_view=(ImageView *) AcquireMagickMemory(sizeof(*image_view)); if (image_view == (ImageView *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(image_view,0,sizeof(*image_view)); image_view->description=ConstantString("ImageView"); image_view->image=image; image_view->view=AcquireVirtualCacheView(image_view->image,exception); image_view->extent.width=image->columns; image_view->extent.height=image->rows; image_view->extent.x=0; image_view->extent.y=0; image_view->exception=AcquireExceptionInfo(); image_view->debug=IsEventLogging(); image_view->signature=MagickCoreSignature; return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w I m a g e V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewImageViewRegion() returns a image view required for all other methods % in the Image View API. % % The format of the NewImageViewRegion method is: % % ImageView *NewImageViewRegion(MagickCore *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height, % ExceptionInfo *exception) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageView *NewImageViewRegion(Image *image,const ssize_t x, const ssize_t y,const size_t width,const size_t height, ExceptionInfo *exception) { ImageView *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); image_view=(ImageView *) AcquireMagickMemory(sizeof(*image_view)); if (image_view == (ImageView *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(image_view,0,sizeof(*image_view)); image_view->description=ConstantString("ImageView"); image_view->view=AcquireVirtualCacheView(image_view->image,exception); image_view->image=image; image_view->extent.width=width; image_view->extent.height=height; image_view->extent.x=x; image_view->extent.y=y; image_view->exception=AcquireExceptionInfo(); image_view->debug=IsEventLogging(); image_view->signature=MagickCoreSignature; return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewDescription() associates a description with an image view. % % The format of the SetImageViewDescription method is: % % void SetImageViewDescription(ImageView *image_view, % const char *description) % % A description of each parameter follows: % % o image_view: the image view. % % o description: the image view description. % */ MagickExport void SetImageViewDescription(ImageView *image_view, const char *description) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); image_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewIterator() iterates over the image view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetImageViewIterator method is: % % MagickBooleanType SetImageViewIterator(ImageView *destination, % SetImageViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the image view. % % o set: the set callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType SetImageViewIterator(ImageView *destination, SetImageViewMethod set,void *context) { Image *destination_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(destination != (ImageView *) NULL); assert(destination->signature == MagickCoreSignature); if (set == (SetImageViewMethod) NULL) return(MagickFalse); destination_image=destination->image; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=destination->extent.height-destination->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(destination_image,destination_image,height,1) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,destination->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetImageViewIterator) #endif proceed=SetImageProgress(destination_image,destination->description, progress++,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferImageViewIterator() iterates over two image views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination image view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const ImageView *source, % ImageView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferImageViewIterator method is: % % MagickBooleanType TransferImageViewIterator(ImageView *source, % ImageView *destination,TransferImageViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o destination: the destination image view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType TransferImageViewIterator(ImageView *source, ImageView *destination,TransferImageViewMethod transfer,void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (transfer == (TransferImageViewMethod) NULL) return(MagickFalse); source_image=source->image; destination_image=destination->image; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict pixels; register Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransferImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateImageViewIterator() iterates over the image view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(ImageView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateImageViewIterator method is: % % MagickBooleanType UpdateImageViewIterator(ImageView *source, % UpdateImageViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o update: the update callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType UpdateImageViewIterator(ImageView *source, UpdateImageViewMethod update,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (update == (UpdateImageViewMethod) NULL) return(MagickFalse); source_image=source->image; status=SetImageStorageClass(source_image,DirectClass,source->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (update(source,y,id,context) == MagickFalse) status=MagickFalse; status=SyncCacheViewAuthenticPixels(source->view,source->exception); if (status == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_UpdateImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
mlp_mnist_f32.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Evangelos Georganas (Intel Corp.) ******************************************************************************/ #include <libxsmm.h> #include <libxsmm_sync.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) # include <omp.h> #endif #define TEST_ACCURACY /* include c-based dnn library */ #include "../common/dnn_common.h" #include "../common/mnist.h" LIBXSMM_INLINE void my_init_buf(float* buf, size_t size, int initPos, int initOne) { int i; zero_buf(buf, size); for (i = 0; i < (int)size; ++i) { buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0))); } } typedef enum my_eltwise_fuse { MY_ELTWISE_FUSE_NONE = 0, MY_ELTWISE_FUSE_BIAS = 1, MY_ELTWISE_FUSE_RELU = 2, MY_ELTWISE_FUSE_BIAS_RELU = MY_ELTWISE_FUSE_BIAS | MY_ELTWISE_FUSE_RELU } my_eltwise_fuse; typedef enum my_pass { MY_PASS_FWD = 1, MY_PASS_BWD_D = 2, MY_PASS_BWD_W = 4, MY_PASS_BWD = 6 } my_pass; typedef struct my_opt_config { libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; float lr; size_t scratch_size; libxsmm_barrier* barrier; } my_opt_config; typedef struct my_smax_fwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint threads; size_t scratch_size; libxsmm_barrier* barrier; } my_smax_fwd_config; typedef struct my_smax_bwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint threads; size_t scratch_size; float loss_weight; libxsmm_barrier* barrier; } my_smax_bwd_config; typedef struct my_fc_fwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; my_eltwise_fuse fuse_type; libxsmm_blasint fwd_bf; libxsmm_blasint fwd_2d_blocking; libxsmm_blasint fwd_col_teams; libxsmm_blasint fwd_row_teams; size_t scratch_size; libxsmm_barrier* barrier; libxsmm_smmfunction_reducebatch_strd gemm_fwd; libxsmm_smmfunction_reducebatch_strd gemm_fwd2; } my_fc_fwd_config; typedef struct my_fc_bwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; my_eltwise_fuse fuse_type; libxsmm_blasint bwd_bf; libxsmm_blasint bwd_2d_blocking; libxsmm_blasint bwd_col_teams; libxsmm_blasint bwd_row_teams; libxsmm_blasint upd_bf; libxsmm_blasint upd_2d_blocking; libxsmm_blasint upd_col_teams; libxsmm_blasint upd_row_teams; libxsmm_blasint ifm_subtasks; libxsmm_blasint ofm_subtasks; size_t scratch_size; libxsmm_barrier* barrier; libxsmm_smmfunction_reducebatch_strd gemm_bwd; libxsmm_smmfunction_reducebatch_strd gemm_bwd2; libxsmm_smmfunction_reducebatch_strd gemm_upd; libxsmm_smmfunction_reducebatch_strd gemm_upd2; libxsmm_meltwfunction_unary norm_to_normT_kernel; } my_fc_bwd_config; my_fc_fwd_config setup_my_fc_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) { my_fc_fwd_config res; libxsmm_blasint lda = bk; libxsmm_blasint ldb = bc; libxsmm_blasint ldc = bk; float alpha = 1.0f; float beta = 1.0f; float zerobeta = 0.0f; /* setting up some handle values */ res.N = N; res.C = C; res.K = K; res.bn = bn; res.bc = bc; res.bk = bk; res.threads = threads; res.fuse_type = fuse_type; /* setup parallelization strategy */ if (threads == 16) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_col_teams = 2; res.fwd_row_teams = 8; } else { res.fwd_bf = 1; res.fwd_2d_blocking = 0; res.fwd_col_teams = 1; res.fwd_row_teams = 1; } #if 0 res.fwd_bf = atoi(getenv("FWD_BF")); res.fwd_2d_blocking = atoi(getenv("FWD_2D_BLOCKING")); res.fwd_col_teams = atoi(getenv("FWD_COL_TEAMS")); res.fwd_row_teams = atoi(getenv("FWD_ROW_TEAMS")); #endif /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ res.gemm_fwd = libxsmm_smmdispatch_reducebatch_strd(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(float), res.bc*res.bn*sizeof(float), &lda, &ldb, &ldc, &alpha, &beta, NULL, NULL); if ( res.gemm_fwd == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd failed. Bailing...!\n"); exit(-1); } res.gemm_fwd2 = libxsmm_smmdispatch_reducebatch_strd(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(float), res.bc*res.bn*sizeof(float), &lda, &ldb, &ldc, &alpha, &zerobeta, NULL, NULL); if ( res.gemm_fwd2 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd2 failed. Bailing...!\n"); exit(-1); } /* init scratch */ res.scratch_size = 0; return res; } my_fc_bwd_config setup_my_fc_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) { my_fc_bwd_config res; libxsmm_blasint lda = bc; libxsmm_blasint ldb = bk; libxsmm_blasint ldc = bc; float alpha = 1.0f; float beta = 1.0f; float zerobeta = 0.0f; int updflags = LIBXSMM_GEMM_FLAGS( 'N', 'T' ); libxsmm_blasint updM; libxsmm_blasint updN; libxsmm_blasint ldaT = bk; /* setting up some handle values */ res.N = N; res.C = C; res.K = K; res.bn = bn; res.bc = bc; res.bk = bk; res.threads = threads; res.fuse_type = fuse_type; /* setup parallelization strategy */ if (threads == 16) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_col_teams = 2; res.bwd_row_teams = 8; res.upd_bf = 1; res.upd_2d_blocking = 0; res.upd_col_teams = 1; res.upd_row_teams = 1; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else { res.bwd_bf = 1; res.bwd_2d_blocking = 0; res.bwd_col_teams = 1; res.bwd_row_teams = 1; res.upd_bf = 1; res.upd_2d_blocking = 0; res.upd_col_teams = 1; res.upd_row_teams = 1; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } #if 0 res.bwd_bf = atoi(getenv("BWD_BF")); res.bwd_2d_blocking = atoi(getenv("BWD_2D_BLOCKING")); res.bwd_col_teams = atoi(getenv("BWD_COL_TEAMS")); res.bwd_row_teams = atoi(getenv("BWD_ROW_TEAMS")); res.upd_bf = atoi(getenv("UPD_BF")); res.upd_2d_blocking = atoi(getenv("UPD_2D_BLOCKING")); res.upd_col_teams = atoi(getenv("UPD_COL_TEAMS")); res.upd_row_teams = atoi(getenv("UPD_ROW_TEAMS")); res.ifm_subtasks = atoi(getenv("IFM_SUBTASKS")); res.ofm_subtasks = atoi(getenv("OFM_SUBTASKS")); #endif /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ /* BWD GEMM */ res.gemm_bwd = libxsmm_smmdispatch_reducebatch_strd(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(float), res.bk*res.bn*sizeof(float), &lda, &ldb, &ldc, &alpha, &beta, NULL, NULL); if ( res.gemm_bwd == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd failed. Bailing...!\n"); exit(-1); } res.gemm_bwd2 = libxsmm_smmdispatch_reducebatch_strd(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(float), res.bk*res.bn*sizeof(float), &lda, &ldb, &ldc, &alpha, &zerobeta, NULL, NULL); if ( res.gemm_bwd2 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd2 failed. Bailing...!\n"); exit(-1); } res.norm_to_normT_kernel = libxsmm_dispatch_meltw_unary(bk, bc, &ldaT, &lda, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_NORMT); if ( res.norm_to_normT_kernel == NULL ) { fprintf( stderr, "JIT for TPP norm_to_normT_kernel failed. Bailing...!\n"); exit(-1); } /* UPD GEMM */ lda = res.bk; ldb = res.bc; ldc = res.bk; updM = res.bk/res.ofm_subtasks; updN = res.bc/res.ifm_subtasks; res.gemm_upd = libxsmm_smmdispatch_reducebatch_strd(updM, updN, res.bn, res.K*res.bn*sizeof(float), res.C*res.bn*sizeof(float), &lda, &ldb, &ldc, &alpha, &beta, &updflags, NULL); if ( res.gemm_upd == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_upd failed. Bailing...!\n"); exit(-1); } res.gemm_upd2 = libxsmm_smmdispatch_reducebatch_strd(updM, updN, res.bn, res.K*res.bn*sizeof(float), res.C*res.bn*sizeof(float), &lda, &ldb, &ldc, &alpha, &zerobeta, &updflags, NULL); if ( res.gemm_upd2 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_upd2 failed. Bailing...!\n"); exit(-1); } /* init scratch */ res.scratch_size = sizeof(float) * ( (((size_t)res.C + (size_t)res.K) * (size_t)res.N) + ((size_t)res.C * (size_t)res.K) ); return res; } my_opt_config setup_my_opt(libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, float lr) { my_opt_config res; /* setting up some handle values */ res.C = C; res.K = K; res.bc = bc; res.bk = bk; res.threads = threads; res.lr = lr; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = 0; return res; } my_smax_fwd_config setup_my_smax_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads) { my_smax_fwd_config res; /* setting up some handle values */ res.C = C; res.N = N; res.bc = bc; res.bn = bn; res.threads = threads; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = 0; return res; } my_smax_bwd_config setup_my_smax_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads, float loss_weight) { my_smax_bwd_config res; /* setting up some handle values */ res.C = C; res.N = N; res.bc = bc; res.bn = bn; res.threads = threads; res.loss_weight = loss_weight; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = 0; return res; } void my_fc_fwd_exec( my_fc_fwd_config cfg, const float* wt_ptr, const float* in_act_ptr, float* out_act_ptr, const float* bias_ptr, unsigned char* relu_ptr, int start_tid, int my_tid, void* scratch ) { const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk; const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksOFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* loop variables */ libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ifm1 = 0, mb2 = 0, ofm2 = 0; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0; libxsmm_blasint my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0; libxsmm_blasint my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; LIBXSMM_VLA_DECL(4, float, output, out_act_ptr, nBlocksOFm, cfg.bn, cfg.bk); LIBXSMM_VLA_DECL(4, const float, input, in_act_ptr, nBlocksIFm, cfg.bn, cfg.bc); LIBXSMM_VLA_DECL(4, const float, filter, wt_ptr, nBlocksIFm, cfg.bc, cfg.bk); LIBXSMM_VLA_DECL(2, const float, bias, bias_ptr, cfg.bk); LIBXSMM_VLA_DECL(4, unsigned char, relumask, relu_ptr, nBlocksOFm, cfg.bn, cfg.bk); unsigned long long blocks = nBlocksIFm; libxsmm_blasint CB_BLOCKS = nBlocksIFm, BF = 1; LIBXSMM_UNUSED( scratch ); BF = cfg.fwd_bf; CB_BLOCKS = nBlocksIFm/BF; blocks = CB_BLOCKS; col_teams = cfg.fwd_col_teams; row_teams = cfg.fwd_row_teams; my_row_id = ltid % row_teams; my_col_id = ltid / row_teams; N_tasks_per_thread = LIBXSMM_UPDIV(nBlocksMB, col_teams); M_tasks_per_thread = LIBXSMM_UPDIV(nBlocksOFm, row_teams); my_N_start = LIBXSMM_MIN(my_col_id * N_tasks_per_thread, nBlocksMB); my_N_end = LIBXSMM_MIN((my_col_id+1) * N_tasks_per_thread, nBlocksMB); my_M_start = LIBXSMM_MIN(my_row_id * M_tasks_per_thread, nBlocksOFm); my_M_end = LIBXSMM_MIN((my_row_id+1) * M_tasks_per_thread, nBlocksOFm); /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); if (cfg.fwd_2d_blocking == 1) { if (BF > 1) { for (ifm1 = 0; ifm1 < BF; ++ifm1) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { /* Initialize output slice */ if ( ifm1 == 0 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = LIBXSMM_VLA_ACCESS(2, bias, ofm1, ofm2, cfg.bk); } } } else { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (float)0; } } } } /* BRGEMM */ cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(4, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bc, cfg.bk), &LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); /* apply post BRGEMM fusion */ if ( ifm1 == BF-1 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { float l_cur_out = LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk); LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (unsigned char)(( l_cur_out > (float)0 ) ? 1 : 0); l_cur_out = (l_cur_out > (float)0) ? l_cur_out : (float)0; LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out; } } } } } } } } else { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = LIBXSMM_VLA_ACCESS(2, bias, ofm1, ofm2, cfg.bk); } } cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(4, filter, ofm1, 0, 0, 0, nBlocksIFm, cfg.bc, cfg.bk), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); } else { cfg.gemm_fwd2( &LIBXSMM_VLA_ACCESS(4, filter, ofm1, 0, 0, 0, nBlocksIFm, cfg.bc, cfg.bk), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); } /* post GEMM fusion */ if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { float l_cur_out = LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk); LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (unsigned char)(( l_cur_out > (float)0 ) ? 1 : 0); l_cur_out = ( l_cur_out > (float)0 ) ? l_cur_out : (float)0; LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out; } } } } } } } else { if (BF > 1) { for ( ifm1 = 0; ifm1 < BF; ++ifm1 ) { for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) { mb1 = mb1ofm1%nBlocksMB; ofm1 = mb1ofm1/nBlocksMB; /* Initialize output slice */ if ( ifm1 == 0 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = LIBXSMM_VLA_ACCESS(2, bias, ofm1, ofm2, cfg.bk); } } } else { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (float)0; } } } } /* BRGEMM */ cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(4, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bc, cfg.bk), &LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); /* post GEMM fusion */ if ( ifm1 == BF-1 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { float l_cur_out = LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk); LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (unsigned char)(( l_cur_out > (float)0 ) ? 1 : 0); l_cur_out = (l_cur_out > (float)0) ? l_cur_out : (float)0; LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out; } } } } } } } else { for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) { mb1 = mb1ofm1%nBlocksMB; ofm1 = mb1ofm1/nBlocksMB; if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = LIBXSMM_VLA_ACCESS(2, bias, ofm1, ofm2, cfg.bk); } } cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(4, filter, ofm1, 0, 0, 0, nBlocksIFm, cfg.bc, cfg.bk), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); } else { cfg.gemm_fwd2( &LIBXSMM_VLA_ACCESS(4, filter, ofm1, 0, 0, 0, nBlocksIFm, cfg.bc, cfg.bk), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); } /* post GEMM fusion */ if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { float l_cur_out = LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk); LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (unsigned char)(( l_cur_out > (float)0 ) ? 1 : 0); l_cur_out = ( l_cur_out > (float)0 ) ? l_cur_out : (float)0; LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out; } } } } } } libxsmm_barrier_wait(cfg.barrier, ltid); } void my_fc_bwd_exec( my_fc_bwd_config cfg, const float* wt_ptr, float* din_act_ptr, float* dout_act_ptr, float* dwt_ptr, const float* in_act_ptr, float* dbias_ptr, const unsigned char* relu_ptr, my_pass pass, int start_tid, int my_tid, void* scratch ) { /* here we assume that input and output blocking is similar */ const libxsmm_blasint bn = cfg.bn; const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint bc = cfg.bc; const libxsmm_blasint nBlocksIFm = cfg.C / bc; const libxsmm_blasint nBlocksOFm = cfg.K / bk; const libxsmm_blasint nBlocksMB = cfg.N / bn; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint eltwise_work = nBlocksOFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint eltwise_chunksize = (eltwise_work % cfg.threads == 0) ? (eltwise_work / cfg.threads) : ((eltwise_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint eltwise_thr_begin = (ltid * eltwise_chunksize < eltwise_work) ? (ltid * eltwise_chunksize) : eltwise_work; const libxsmm_blasint eltwise_thr_end = ((ltid + 1) * eltwise_chunksize < eltwise_work) ? ((ltid + 1) * eltwise_chunksize) : eltwise_work; libxsmm_blasint mb1ofm1; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint dbias_work = nBlocksOFm; /* compute chunk size */ const libxsmm_blasint dbias_chunksize = (dbias_work % cfg.threads == 0) ? (dbias_work / cfg.threads) : ((dbias_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint dbias_thr_begin = (ltid * dbias_chunksize < dbias_work) ? (ltid * dbias_chunksize) : dbias_work; const libxsmm_blasint dbias_thr_end = ((ltid + 1) * dbias_chunksize < dbias_work) ? ((ltid + 1) * dbias_chunksize) : dbias_work; /* loop variables */ libxsmm_blasint ofm1 = 0, mb1 = 0, ofm2 = 0, mb2 = 0; float *grad_output_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? ((float*)scratch)+(cfg.C*cfg.K) : dout_act_ptr); LIBXSMM_VLA_DECL(4, const float, doutput_orig, dout_act_ptr, nBlocksOFm, bn, bk); LIBXSMM_VLA_DECL(4, float, doutput, grad_output_ptr, nBlocksOFm, bn, bk); LIBXSMM_VLA_DECL(2, float, dbias, dbias_ptr, cfg.bk); LIBXSMM_VLA_DECL(4, const unsigned char, relumask, relu_ptr, nBlocksOFm, cfg.bn, cfg.bk); libxsmm_meltw_unary_param trans_param; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) { for ( mb1ofm1 = eltwise_thr_begin; mb1ofm1 < eltwise_thr_end; ++mb1ofm1 ) { mb1 = mb1ofm1%nBlocksMB; ofm1 = mb1ofm1/nBlocksMB; for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { float l_cur_out = LIBXSMM_VLA_ACCESS(4, doutput_orig, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk); l_cur_out = (LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) != 0) ? l_cur_out : (float)0; LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out; } } } /* wait for eltwise to finish */ libxsmm_barrier_wait(cfg.barrier, ltid); } if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { for ( ofm1 = dbias_thr_begin; ofm1 < dbias_thr_end; ++ofm1 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { LIBXSMM_VLA_ACCESS( 2, dbias, ofm1, ofm2, cfg.bk ) = 0.0f; } for ( mb1 = 0; mb1 < nBlocksMB; ++mb1 ) { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { LIBXSMM_VLA_ACCESS( 2, dbias, ofm1, ofm2, cfg.bk ) += LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk); } } } } /* wait for eltwise to finish */ libxsmm_barrier_wait(cfg.barrier, ltid); } if ( (pass & MY_PASS_BWD_D) == MY_PASS_BWD_D ) { const libxsmm_blasint use_2d_blocking = cfg.bwd_2d_blocking; /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksIFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint transpose_work = nBlocksIFm * nBlocksOFm; /* compute chunk size */ const libxsmm_blasint transpose_chunksize = (transpose_work % cfg.threads == 0) ? (transpose_work / cfg.threads) : ((transpose_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint transpose_thr_begin = (ltid * transpose_chunksize < transpose_work) ? (ltid * transpose_chunksize) : transpose_work; const libxsmm_blasint transpose_thr_end = ((ltid + 1) * transpose_chunksize < transpose_work) ? ((ltid + 1) * transpose_chunksize) : transpose_work; /* loop variables */ libxsmm_blasint ifm1 = 0, ifm2 = 0, ifm1ofm1 = 0, mb1ifm1 = 0; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; LIBXSMM_VLA_DECL(4, const float, filter, wt_ptr, nBlocksIFm, bc, bk); LIBXSMM_VLA_DECL(4, float, dinput, din_act_ptr, nBlocksIFm, bn, bc); LIBXSMM_VLA_DECL(4, float, filter_tr, (float*)scratch, nBlocksOFm, bk, bc); unsigned long long blocks = nBlocksOFm; libxsmm_blasint KB_BLOCKS = nBlocksOFm, BF = 1; BF = cfg.bwd_bf; KB_BLOCKS = nBlocksOFm/BF; blocks = KB_BLOCKS; if (use_2d_blocking == 1) { col_teams = cfg.bwd_col_teams; row_teams = cfg.bwd_row_teams; my_row_id = ltid % row_teams; my_col_id = ltid / row_teams; N_tasks_per_thread = LIBXSMM_UPDIV(nBlocksMB, col_teams); M_tasks_per_thread = LIBXSMM_UPDIV(nBlocksIFm, row_teams); my_N_start = LIBXSMM_MIN(my_col_id * N_tasks_per_thread, nBlocksMB); my_N_end = LIBXSMM_MIN((my_col_id+1) * N_tasks_per_thread, nBlocksMB); my_M_start = LIBXSMM_MIN(my_row_id * M_tasks_per_thread, nBlocksIFm); my_M_end = LIBXSMM_MIN((my_row_id+1) * M_tasks_per_thread, nBlocksIFm); } /* transpose weight */ for (ifm1ofm1 = transpose_thr_begin; ifm1ofm1 < transpose_thr_end; ++ifm1ofm1) { ofm1 = ifm1ofm1 / nBlocksIFm; ifm1 = ifm1ofm1 % nBlocksIFm; trans_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(4, filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, filter_tr, ifm1, ofm1, 0, 0, nBlocksOFm, bk, bc); cfg.norm_to_normT_kernel(&trans_param); } /* wait for transpose to finish */ libxsmm_barrier_wait(cfg.barrier, ltid); if (use_2d_blocking == 1) { if (BF > 1) { for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) { for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { /* Initialize intermediate f32 tensor */ if ( ofm1 == 0 ) { for ( mb2 = 0; mb2 < bn; ++mb2 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, mb2, ifm2, nBlocksIFm, bn, bc) = (float)0; } } } cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(4, filter_tr, ifm1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bk, bc ), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } } else { for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { cfg.gemm_bwd2( &LIBXSMM_VLA_ACCESS(4, filter_tr, ifm1, 0, 0, 0, nBlocksOFm, bk, bc), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } } else { if (BF > 1) { for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) { for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) { mb1 = mb1ifm1%nBlocksMB; ifm1 = mb1ifm1/nBlocksMB; /* Initialize intermediate f32 tensor */ if ( ofm1 == 0 ) { for ( mb2 = 0; mb2 < bn; ++mb2 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, mb2, ifm2, nBlocksIFm, bn, bc) = (float)0; } } } cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(4, filter_tr, ifm1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bk, bc ), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } else { for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) { mb1 = mb1ifm1%nBlocksMB; ifm1 = mb1ifm1/nBlocksMB; cfg.gemm_bwd2( &LIBXSMM_VLA_ACCESS(4, filter_tr, ifm1, 0, 0, 0, nBlocksOFm, bk, bc ), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } libxsmm_barrier_wait(cfg.barrier, ltid); } if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) { /* number of tasks that could be run in parallel */ const libxsmm_blasint ofm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ofm_subtasks; const libxsmm_blasint ifm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ifm_subtasks; const libxsmm_blasint bbk = (cfg.upd_2d_blocking == 1) ? bk : bk/ofm_subtasks; const libxsmm_blasint bbc = (cfg.upd_2d_blocking == 1) ? bc : bc/ifm_subtasks; const libxsmm_blasint work = nBlocksIFm * ifm_subtasks * nBlocksOFm * ofm_subtasks; const libxsmm_blasint Cck_work = nBlocksIFm * ifm_subtasks * ofm_subtasks; const libxsmm_blasint Cc_work = nBlocksIFm * ifm_subtasks; /* 2D blocking parameters */ libxsmm_blasint use_2d_blocking = cfg.upd_2d_blocking; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; libxsmm_blasint BF = cfg.upd_bf; /* loop variables */ libxsmm_blasint ifm1ofm1 = 0, ifm1 = 0, ifm2 = 0, bfn = 0, ii = 0, jj = 0; /* Batch reduce related variables */ unsigned long long blocks = nBlocksMB/BF; LIBXSMM_VLA_DECL(4, const float, input, in_act_ptr, nBlocksIFm, bn, bc); LIBXSMM_VLA_DECL(4, float, dfilter, dwt_ptr, nBlocksIFm, bc, bk); if (use_2d_blocking == 1) { col_teams = cfg.upd_col_teams; row_teams = cfg.upd_row_teams; my_row_id = ltid % row_teams; my_col_id = ltid / row_teams; N_tasks_per_thread = LIBXSMM_UPDIV(nBlocksIFm, col_teams); M_tasks_per_thread = LIBXSMM_UPDIV(nBlocksOFm, row_teams); my_N_start = LIBXSMM_MIN(my_col_id * N_tasks_per_thread, nBlocksIFm); my_N_end = LIBXSMM_MIN((my_col_id+1) * N_tasks_per_thread, nBlocksIFm); my_M_start = LIBXSMM_MIN(my_row_id * M_tasks_per_thread, nBlocksOFm); my_M_end = LIBXSMM_MIN((my_row_id+1) * M_tasks_per_thread, nBlocksOFm); } if (use_2d_blocking == 1) { if (BF == 1) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) { cfg.gemm_upd2(&LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, input, 0, ifm1, 0, 0, nBlocksIFm, bn, bc), &LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk), &blocks); } } } else { for (bfn = 0; bfn < BF; bfn++) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) { /* initialize current work task to zero */ if (bfn == 0) { for (ii = 0; ii<bc; ii++) { for (jj = 0; jj<bk; jj++) { LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, ii, jj, nBlocksIFm, bc, bk) = (float)0; } } } cfg.gemm_upd( &LIBXSMM_VLA_ACCESS(4, doutput, bfn*blocks, ofm1, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, input, bfn*blocks, ifm1, 0, 0, nBlocksIFm, bn, bc), &LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk), &blocks); } } } } } else { if (BF == 1) { for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) { ofm1 = ifm1ofm1 / Cck_work; ofm2 = (ifm1ofm1 % Cck_work) / Cc_work; ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks; ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks; cfg.gemm_upd2( &LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, ofm2*bbk, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, input, 0, ifm1, 0, ifm2*bbc, nBlocksIFm, bn, bc), &LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks); } } else { for (bfn = 0; bfn < BF; bfn++) { for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) { ofm1 = ifm1ofm1 / Cck_work; ofm2 = (ifm1ofm1 % Cck_work) / Cc_work; ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks; ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks; /* initialize current work task to zero */ if (bfn == 0) { for (ii = 0; ii<bbc; ii++) { for (jj = 0; jj<bbk; jj++) { LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, ifm2*bbc+ii, ofm2*bbk+jj, nBlocksIFm, bc, bk) = (float)0; } } } cfg.gemm_upd( &LIBXSMM_VLA_ACCESS(4, doutput, bfn*blocks, ofm1, 0, ofm2*bbk, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, input, bfn*blocks, ifm1, 0, ifm2*bbc, nBlocksIFm, bn, bc), &LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks); } } } } libxsmm_barrier_wait(cfg.barrier, ltid); } } void my_opt_exec( my_opt_config cfg, float* wt_ptr, const float* delwt_ptr, int start_tid, int my_tid, void* scratch ) { /* loop counters */ libxsmm_blasint i; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the filters */ const libxsmm_blasint work = cfg.C * cfg.K; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; libxsmm_blasint iv = ( (thr_end-thr_begin)/16 ) * 16; /* compute iterations which are vectorizable */ __m512 vlr = _mm512_set1_ps( cfg.lr ); /* lazy barrier init */ libxsmm_barrier_init( cfg.barrier, ltid ); for ( i = thr_begin; i < thr_begin+iv; i+=16 ) { _mm512_storeu_ps( wt_ptr+i, _mm512_sub_ps( _mm512_loadu_ps( wt_ptr+i ), _mm512_mul_ps( vlr, _mm512_loadu_ps( delwt_ptr + i ) ) ) ) ; } for ( i = thr_begin+iv; i < thr_end; ++i ) { wt_ptr[i] = wt_ptr[i] - (cfg.lr*delwt_ptr[i]); } libxsmm_barrier_wait( cfg.barrier, ltid ); } void my_smax_fwd_exec( my_smax_fwd_config cfg, const float* in_act_ptr, float* out_act_ptr, const int* label_ptr, float* loss, int start_tid, int my_tid, void* scratch ) { libxsmm_blasint bn = cfg.bn; libxsmm_blasint Bn = cfg.N/cfg.bn; libxsmm_blasint bc = cfg.bc; libxsmm_blasint Bc = cfg.C/cfg.bc; /* loop counters */ libxsmm_blasint i = 0; libxsmm_blasint img1, img2, ifm1, ifm2; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint n_work = Bn * bn; /* compute chunk size */ const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work; const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work; LIBXSMM_VLA_DECL(4, float, output, out_act_ptr, Bc, bn, bc); LIBXSMM_VLA_DECL(4, const float, input, in_act_ptr, Bc, bn, bc); LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn); /* lazy barrier init */ libxsmm_barrier_init( cfg.barrier, ltid ); for ( i = n_thr_begin; i < n_thr_end; ++i ) { float max = FLT_MIN; float sum_of_exp = 0.0f; img1 = i/bn; img2 = i%bn; /* set output to input and set compute max per image */ for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ); if ( LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ) > max ) { max = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ); } } } /* sum exp over outputs */ for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = (float)exp( (double)(LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - max) ); sum_of_exp += LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ); } } /* scale output */ sum_of_exp = 1.0f/sum_of_exp; for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * sum_of_exp; } } } libxsmm_barrier_wait( cfg.barrier, ltid ); /* calculate loss single threaded */ if ( ltid == 0 ) { (*loss) = 0.0f; for ( img1 = 0; img1 < Bn; ++img1 ) { for ( img2 = 0; img2 <bn; ++img2 ) { libxsmm_blasint ifm = (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn ); libxsmm_blasint ifm1b = ifm/bc; libxsmm_blasint ifm2b = ifm%bc; float val = ( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) > FLT_MIN ) ? LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) : FLT_MIN; *loss += LIBXSMM_LOGF( val ); } } *loss = ((-1.0f)*(*loss))/cfg.N; } libxsmm_barrier_wait( cfg.barrier, ltid ); } void my_smax_bwd_exec( my_smax_bwd_config cfg, float* delin_act_ptr, const float* out_act_ptr, const int* label_ptr, int start_tid, int my_tid, void* scratch ) { libxsmm_blasint bn = cfg.bn; libxsmm_blasint Bn = cfg.N/cfg.bn; libxsmm_blasint bc = cfg.bc; libxsmm_blasint Bc = cfg.C/cfg.bc; /* loop counters */ libxsmm_blasint i = 0; libxsmm_blasint img1, img2, ifm1, ifm2; float rcp_N = 1.0f/cfg.N; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint n_work = Bn * bn; /* compute chunk size */ const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work; const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work; LIBXSMM_VLA_DECL(4, const float, output, out_act_ptr, Bc, bn, bc); LIBXSMM_VLA_DECL(4, float, dinput, delin_act_ptr, Bc, bn, bc); LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn); /* lazy barrier init */ libxsmm_barrier_init( cfg.barrier, ltid ); for ( i = n_thr_begin; i < n_thr_end; ++i ) { img1 = i/bn; img2 = i%bn; /* set output to input and set compute max per image */ for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { if ( (ifm1*Bc)+ifm2 == (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn ) ) { LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) = ( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - 1.0f ) * rcp_N * cfg.loss_weight; } else { LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * rcp_N * cfg.loss_weight; } } } } libxsmm_barrier_wait( cfg.barrier, ltid ); } int main(int argc, char* argv[]) { float **act_libxsmm, **fil_libxsmm, **delact_libxsmm, **delfil_libxsmm; float **bias_libxsmm, **delbias_libxsmm; unsigned char **relumask_libxsmm; int *label_libxsmm; my_eltwise_fuse my_fuse; my_fc_fwd_config* my_fc_fwd; my_fc_bwd_config* my_fc_bwd; my_opt_config* my_opt; my_smax_fwd_config my_smax_fwd; my_smax_bwd_config my_smax_bwd; void* scratch = NULL; size_t scratch_size = 0; /* some parameters we can overwrite via cli, default is some inner layer of overfeat */ int iters = 10; /* repetitions of benchmark */ int MB = 256; /* mini-batch size, "N" */ int fuse_type = 0; /* 0: nothing fused, 1: relu fused, 2: elementwise fused, 3: relu and elementwise fused */ char type = 'A'; /* 'A': ALL, 'F': FP, 'B': BP, 'U', WU */ int bn = 32; int bk = 32; int bc = 32; int *C; /* number of input feature maps, "C" */ int num_layers = 0; #if defined(_OPENMP) int nThreads = omp_get_max_threads(); /* number of threads */ #else int nThreads = 1; /* number of threads */ #endif unsigned long long l_start, l_end; double l_total = 0.0; double gflop = 0.0; int i, j; double fil_size = 0.0; double act_size = 0.0; float lr = 0.1f; float loss_weight = 1.0f; float loss = 0.0; libxsmm_matdiff_info norms_fwd, norms_bwd, norms_upd, diff; libxsmm_matdiff_clear(&norms_fwd); libxsmm_matdiff_clear(&norms_bwd); libxsmm_matdiff_clear(&norms_upd); libxsmm_matdiff_clear(&diff); if (argc > 1 && !strncmp(argv[1], "-h", 3)) { printf("Usage: %s iters MB bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); /* reading new values from cli */ i = 1; num_layers = argc - 7; if (argc > i) iters = atoi(argv[i++]); if (argc > i) MB = atoi(argv[i++]); if (argc > i) bn = atoi(argv[i++]); if (argc > i) bk = atoi(argv[i++]); if (argc > i) bc = atoi(argv[i++]); /* allocate the number of channles buffer */ if ( num_layers < 1 ) { printf("Usage: %s iters MB bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } C = (int*)malloc((num_layers+2)*sizeof(int)); for (j = 0 ; i < argc; ++i, ++j ) { C[j] = atoi(argv[i]); } /* handle softmax config */ C[num_layers+1] = C[num_layers]; #if defined(__SSE3__) _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST); #endif /* print some summary */ printf("##########################################\n"); printf("# Setting Up (Common) #\n"); printf("##########################################\n"); printf("PARAMS: N:%d\n", MB); printf("PARAMS: Layers: %d\n", num_layers); printf("PARAMS: ITERS:%d", iters); printf(" Threads:%d\n", nThreads); for (i = 0; i < num_layers; ++i ) { if (i == 0) { act_size += (double)(MB*C[i]*sizeof(float))/(1024.0*1024.0); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i, MB, C[i], (double)(MB*C[i]*sizeof(float))/(1024.0*1024.0) ); } act_size += (double)(MB*C[i+1]*sizeof(float))/(1024.0*1024.0); fil_size += (double)(C[i]*C[i+1]*sizeof(float))/(1024.0*1024.0); printf("SIZE Filter %i (%dx%d): %10.2f MiB\n", i, C[i], C[i+1], (double)(C[i]*C[i+1]*sizeof(float))/(1024.0*1024.0) ); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i+1, MB, C[i+1], (double)(MB*C[i+1]*sizeof(float))/(1024.0*1024.0) ); } act_size += (double)(MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0); printf("SIZE Activations softmax (%dx%d): %10.2f MiB\n", MB, C[num_layers+1], (double)(MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0) ); printf("\nTOTAL SIZE Activations: %10.2f MiB\n", act_size ); printf("TOTAL SIZE Filter: %10.2f MiB\n", fil_size ); printf("TOTAL SIZE delActivations: %10.2f MiB\n", act_size ); printf("TOTAL SIZE delFilter: %10.2f MiB\n", fil_size ); printf("TOTAL SIZE MLP: %10.2f MiB\n", (2.0*fil_size) + (2.0*act_size) ); /* allocate data */ /* +2 because of the softwax layer */ act_libxsmm = (float**)malloc( (num_layers+2)*sizeof(float*) ); delact_libxsmm = (float**)malloc( (num_layers+1)*sizeof(float*) ); for ( i = 0 ; i < num_layers+2; ++i ) { act_libxsmm[i] = (float*)libxsmm_aligned_malloc( MB*C[i]*sizeof(float), 2097152); /* softmax has no incoming gradients */ if ( i < num_layers+1 ) { delact_libxsmm[i] = (float*)libxsmm_aligned_malloc( MB*C[i]*sizeof(float), 2097152); } } fil_libxsmm = (float**)malloc( num_layers*sizeof(float*) ); delfil_libxsmm = (float**)malloc( num_layers*sizeof(float*) ); for ( i = 0 ; i < num_layers; ++i ) { fil_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152); delfil_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152); } bias_libxsmm = (float**)malloc( num_layers*sizeof(float*) ); delbias_libxsmm = (float**)malloc( num_layers*sizeof(float*) ); for ( i = 0 ; i < num_layers; ++i ) { bias_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i+1]*sizeof(float), 2097152); delbias_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i+1]*sizeof(float), 2097152); } relumask_libxsmm = (unsigned char**)malloc( num_layers*sizeof(unsigned char*) ); for ( i = 0 ; i < num_layers; ++i ) { relumask_libxsmm[i] = (unsigned char*)libxsmm_aligned_malloc( MB*C[i+1]*sizeof(unsigned char), 2097152); } label_libxsmm = (int*)libxsmm_aligned_malloc( MB*sizeof(int), 2097152); /* init data */ for ( i = 0 ; i < num_layers+2; ++i ) { my_init_buf( act_libxsmm[i], MB*C[i], 0, 0 ); } for ( i = 0 ; i < num_layers+1; ++i ) { my_init_buf( delact_libxsmm[i], MB*C[i], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf( fil_libxsmm[i], C[i]*C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf( delfil_libxsmm[i], C[i]*C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf( bias_libxsmm[i], C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf( delbias_libxsmm[i], C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { zero_buf_uint8( relumask_libxsmm[i], MB*C[i+1] ); } zero_buf_int32( label_libxsmm, MB ); printf("\n"); printf("##########################################\n"); printf("# Setting Up (custom-Storage) #\n"); printf("##########################################\n"); /* allocating handles */ my_fc_fwd = (my_fc_fwd_config*) malloc( num_layers*sizeof(my_fc_fwd_config) ); my_fc_bwd = (my_fc_bwd_config*) malloc( num_layers*sizeof(my_fc_bwd_config) ); my_opt = (my_opt_config*) malloc( num_layers*sizeof(my_opt_config) ); /* setting up handles + scratch */ for ( i = 0; i < num_layers; ++i ) { /* MNIST Specific where everywhere we use relu act except the last layer */ if ( i < num_layers -1) { my_fuse = MY_ELTWISE_FUSE_RELU; } else { my_fuse = MY_ELTWISE_FUSE_NONE; } my_fc_fwd[i] = setup_my_fc_fwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB, (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, my_fuse); my_fc_bwd[i] = setup_my_fc_bwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB, (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, my_fuse); my_opt[i] = setup_my_opt( C[i], C[i+1], (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, lr ); /* let's allocate and bind scratch */ if ( my_fc_fwd[i].scratch_size > 0 || my_fc_bwd[i].scratch_size > 0 || my_opt[i].scratch_size > 0 ) { size_t alloc_size = LIBXSMM_MAX( LIBXSMM_MAX( my_fc_fwd[i].scratch_size, my_fc_bwd[i].scratch_size), my_opt[i].scratch_size ); if ( alloc_size > scratch_size ) { if ( scratch != NULL ) libxsmm_free( scratch ); scratch_size = alloc_size; scratch = libxsmm_aligned_malloc( scratch_size, 2097152 ); my_init_buf( (float*)(scratch), (scratch_size)/4, 0, 0 ); } } } /* softmax+loss is treated as N+1 layer */ my_smax_fwd = setup_my_smax_fwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB, (C[num_layers+1] % bk == 0) ? bk : C[num_layers+1], nThreads ); my_smax_bwd = setup_my_smax_bwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB, (C[num_layers+1] % bk == 0) ? bk : C[num_layers+1], nThreads, loss_weight ); if ( my_smax_fwd.scratch_size > 0 || my_smax_bwd.scratch_size > 0 ) { size_t alloc_size = LIBXSMM_MAX( my_smax_fwd.scratch_size, my_smax_bwd.scratch_size ); if ( alloc_size > scratch_size ) { if ( scratch != NULL ) libxsmm_free( scratch ); scratch_size = alloc_size; scratch = libxsmm_aligned_malloc( scratch_size, 2097152 ); my_init_buf( (float*)(scratch), (scratch_size)/4, 0, 0 ); } } /* Reading in the MNIST dataset */ int n_batches = NUM_TRAIN/MB, batch_id = 0; int n_epochs = iters, epoch_id = 0; float *input_acts = (float*)libxsmm_aligned_malloc( NUM_TRAIN * C[0] * sizeof(float), 2097152); /* Read in input data */ char *train_image_path = "../mlpdriver/mnist_data/train-images.idx3-ubyte"; char *train_label_path = "../mlpdriver/mnist_data/train-labels.idx1-ubyte"; char *test_image_path = "../mlpdriver/mnist_data/t10k-images.idx3-ubyte"; char *test_label_path = "../mlpdriver/mnist_data/t10k-labels.idx1-ubyte"; load_mnist(train_image_path, train_label_path, test_image_path, test_label_path); /* Format the input layer in NCNC blocked format */ int _i, _j; for (_i = 0; _i < n_batches*MB; _i++) { for (_j = 0; _j < C[0]; _j++) { float val = (float) train_image[_i][_j]; int batchid = _i/MB; int mb = _i % MB; int _bn = (MB % bn == 0) ? bn : MB; int _bc = (C[0] % bc == 0) ? bc : C[0]; float *cur_pos = input_acts + batchid * MB *C[0] + (mb / _bn) * C[0] * _bn + (_j / _bc) * _bn * _bc + (mb % _bn) * _bc + (_j % _bc); *cur_pos = val; } } printf("###########################################\n"); printf("# Training MNIST with %d training samples #\n", n_batches*MB); printf("###########################################\n"); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i,j,epoch_id,batch_id) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif for (epoch_id = 0; epoch_id < n_epochs; epoch_id++) { for (batch_id = 0; batch_id < n_batches; batch_id++) { for ( i = 0; i < num_layers; ++i) { float *input_act_ptr = (i == 0) ? input_acts + batch_id * MB * C[0] : act_libxsmm[i]; my_fc_fwd_exec( my_fc_fwd[i], fil_libxsmm[i], input_act_ptr, act_libxsmm[i+1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch ); } my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], train_label + batch_id * MB, &loss, 0, tid, scratch ); if ((tid == 0) && (batch_id == 0) && (epoch_id % 10 == 0 || epoch_id == n_epochs - 1 )) { printf("Loss for epoch %d batch_id %d is %f\n", epoch_id, batch_id, loss); } my_smax_bwd_exec( my_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], train_label + batch_id * MB, 0, tid, scratch ); for ( i = num_layers-1; i > 0; --i) { my_fc_bwd_exec( my_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i], act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], MY_PASS_BWD, 0, tid, scratch ); my_opt_exec( my_opt[i], fil_libxsmm[i], delfil_libxsmm[i], 0, tid, scratch ); } my_fc_bwd_exec( my_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0], input_acts + batch_id * MB * C[0], delbias_libxsmm[0], relumask_libxsmm[0], MY_PASS_BWD_W, 0, tid, scratch ); my_opt_exec( my_opt[0], fil_libxsmm[0], delfil_libxsmm[0], 0, tid, scratch ); } } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); gflop = 0.0; for ( i = num_layers-1; i > 0; --i) { gflop += (6.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)((double)n_epochs *(double)n_batches)) / (1000.0*1000.0*1000.0); } gflop += (4.0*(double)MB*(double)C[0]*(double)C[1]*(double)((double)n_epochs *(double)n_batches)) / (1000.0*1000.0*1000.0); printf("GFLOP = %.5g\n", gflop/(double)((double)n_epochs *(double)n_batches)); printf("fp time = %.5g\n", ((double)(l_total/((double)n_epochs *(double)n_batches)))); printf("GFLOPS = %.5g\n", gflop/l_total); printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB ); for ( i = 0; i < num_layers; ++i ) { printf("%i,", C[i] ); } printf("%f,%f\n", ((double)(l_total/((double)n_epochs *(double)n_batches))), gflop/l_total); #ifdef TEST_ACCURACY /* Test accuracy */ n_batches = NUM_TEST/MB; for (_i = 0; _i < n_batches * MB; _i++) { for (_j = 0; _j < C[0]; _j++) { float val = (float) test_image[_i][_j]; int batchid = _i/MB; int mb = _i % MB; int _bn = (MB % bn == 0) ? bn : MB; int _bc = (C[0] % bc == 0) ? bc : C[0]; float *cur_pos = input_acts + batchid * MB *C[0] + (mb / _bn) * C[0] * _bn + (_j / _bc) * _bn * _bc + (mb % _bn) * _bc + (_j % _bc); *cur_pos = val; } } n_batches = NUM_TEST/MB; unsigned int hits = 0; unsigned int samples = 0; #if defined(_OPENMP) # pragma omp parallel private(i,j,batch_id) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif for (batch_id = 0; batch_id < n_batches; batch_id++) { for ( i = 0; i < num_layers; ++i) { float *input_act_ptr = (i == 0) ? input_acts + batch_id * MB * C[0] : act_libxsmm[i]; my_fc_fwd_exec( my_fc_fwd[i], fil_libxsmm[i], input_act_ptr, act_libxsmm[i+1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch ); } my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], test_label + batch_id * MB, &loss, 0, tid, scratch ); if (tid == 0) { for (_i = 0; _i < MB; _i++) { int label = *(test_label + batch_id * MB + _i); int max_id = 0; float max_val = 0.0; max_val = *(act_libxsmm[num_layers+1] + _i * 10); float sum = max_val; /* Find predicted label */ for (_j = 1; _j < 10; _j++) { float val = *(act_libxsmm[num_layers+1] + _i * 10 + _j); sum += val; if (val > max_val) { max_id = _j; max_val = val; } } /* Compare with true label */ if (max_id == label) { hits++; } samples++; } } #pragma omp barrier } } printf("Accuracy is %f %% (%d test samples)\n", (1.0*hits)/(1.0*samples)*100.0, samples); #endif /* deallocate data */ if ( scratch != NULL ) { libxsmm_free(scratch); } for ( i = 0; i < num_layers; ++i ) { if ( i == 0 ) { libxsmm_free(act_libxsmm[i]); libxsmm_free(delact_libxsmm[i]); } libxsmm_free(act_libxsmm[i+1]); libxsmm_free(delact_libxsmm[i+1]); libxsmm_free(fil_libxsmm[i]); libxsmm_free(delfil_libxsmm[i]); libxsmm_free(bias_libxsmm[i]); libxsmm_free(delbias_libxsmm[i]); libxsmm_free(relumask_libxsmm[i]); } libxsmm_free(act_libxsmm[num_layers+1]); libxsmm_free(label_libxsmm); libxsmm_free(input_acts); free( my_opt ); free( my_fc_fwd ); free( my_fc_bwd ); free( act_libxsmm ); free( delact_libxsmm ); free( fil_libxsmm ); free( delfil_libxsmm ); free( bias_libxsmm ); free( delbias_libxsmm ); free( relumask_libxsmm ); free( C ); /* some empty lines at the end */ printf("\n\n\n"); return 0; }
U.h
/* * Author: Salvatore Mandra (salvatore.mandra@nasa.gov) * * Copyright © 2021, United States Government, as represented by the * Administrator of the National Aeronautics and Space Administration. All * rights reserved. * * The HybridQ: A Hybrid Simulator for Quantum Circuits platform is licensed * under the Apache License, Version 2.0 (the "License"); you may not use this * file except in compliance with the License. You may obtain a copy of the * License at http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ #ifndef HYBRIDQ__U_HPP #define HYBRIDQ__U_HPP #include "pack.h" #include "utils.h" namespace hybridq::U { template <std::size_t log2_pack_size, typename float_type, typename... Positions> int apply(float_type *psi_re_ptr, float_type *psi_im_ptr, const float_type *U_ptr, const std::size_t state_size_ptr, Positions &&... pos) { // Check if pointer are correctly aligned if (reinterpret_cast<std::size_t>(psi_re_ptr) % 32 or reinterpret_cast<std::size_t>(psi_im_ptr) % 32) return 1; // Get size of pack static const std::size_t pack_size = 1uL << log2_pack_size; // Get pack_type using pack_type = typename __pack__<float_type, pack_size>::value_type; // Get number of positions static const std::size_t n_pos = sizeof...(pos); // Check that all positions are positive numbers if (not[](auto &&... x) { return ((x >= 0) & ...); }(pos...)) return 1; // Check that all positions are above log2_pack_size if ([](auto &&... x) { return ((static_cast<std::size_t>(x) < log2_pack_size) + ...); }(pos...) != 0) return 1; // Recast to the right size auto *psi_re = reinterpret_cast<pack_type *>(psi_re_ptr); auto *psi_im = reinterpret_cast<pack_type *>(psi_im_ptr); const std::size_t state_size = state_size_ptr >> log2_pack_size; // Split in real and imaginary parts static const std::size_t U_size = 1uL << n_pos; const auto U_re = subset<0, 2 * U_size * U_size, 2>(U_ptr); const auto U_im = subset<1, 2 * U_size * U_size, 2>(U_ptr); // Shift positions const auto shift_pos = [](auto &&pos) { std::array<std::size_t, n_pos> shift_pos; for (std::size_t i = 0; i < n_pos; ++i) shift_pos[i] = pos[i] - log2_pack_size; return shift_pos; }(std::array{pos...}); // Get zero static const auto _zero = __pack__<float_type, pack_size>::get(0); #pragma omp parallel for for (std::size_t i = 0; i < (state_size >> n_pos); ++i) { // Get indexes to expand const auto _pos = expand(i, shift_pos); // Buffer real and imaginary parts from state const auto _psi_re = get(psi_re, _pos); const auto _psi_im = get(psi_im, _pos); // Compute matrix multiplication for (std::size_t i = 0; i < U_size; ++i) { auto _re{_zero}; auto _im{_zero}; for (std::size_t j = 0; j < U_size; ++j) { const auto _U_re = U_re[i * U_size + j]; const auto _U_im = U_im[i * U_size + j]; _re += _U_re * _psi_re[j] - _U_im * _psi_im[j]; _im += _U_re * _psi_im[j] + _U_im * _psi_re[j]; } psi_re[_pos[i]] = _re; psi_im[_pos[i]] = _im; } } return 0; } template <std::size_t log2_pack_size, typename float_type, typename Positions, std::size_t n_pos = array_size_v<Positions>, std::size_t... I> int apply(float_type *psi_re_ptr, float_type *psi_im_ptr, const float_type *U_ptr, Positions &&pos, const std::size_t state_size_ptr, std::index_sequence<I...>) { return apply<log2_pack_size>(psi_re_ptr, psi_im_ptr, U_ptr, state_size_ptr, pos[I]...); } template <std::size_t log2_pack_size, typename float_type, typename Positions, std::size_t n_pos = array_size_v<Positions>> int apply(float_type *psi_re_ptr, float_type *psi_im_ptr, const float_type *U_ptr, Positions &&pos, const std::size_t state_size_ptr) { return apply<log2_pack_size>(psi_re_ptr, psi_im_ptr, U_ptr, pos, state_size_ptr, std::make_index_sequence<n_pos>{}); } template <std::size_t log2_pack_size, typename float_type, typename index_type> int apply(float_type *psi_re_ptr, float_type *psi_im_ptr, const float_type *U_ptr, const index_type *pos, const std::size_t state_size_ptr, const std::size_t n_pos) { // Check if pointer are correctly aligned if (reinterpret_cast<std::size_t>(psi_re_ptr) % 32 or reinterpret_cast<std::size_t>(psi_im_ptr) % 32) return 1; // Get size of pack const std::size_t pack_size = 1uL << log2_pack_size; // Get U_Size const std::size_t U_size = 1uL << n_pos; // Get pack_type using pack_type = typename __pack__<float_type, pack_size>::value_type; // Check that all positions are positive numbers for (std::size_t i = 0; i < n_pos; ++i) if (pos[i] < 0 or static_cast<std::size_t>(pos[i]) < log2_pack_size) return 1; // Recast to the right size auto *psi_re = reinterpret_cast<pack_type *>(psi_re_ptr); auto *psi_im = reinterpret_cast<pack_type *>(psi_im_ptr); const std::size_t state_size = state_size_ptr >> log2_pack_size; // Compute offset std::size_t offset[n_pos]; for (std::size_t i = 0; i < n_pos; ++i) { offset[i] = log2_pack_size; for (std::size_t j = i + 1; j < n_pos; ++j) offset[i] += (pos[j] < pos[i]); } // Allocate buffers pack_type _psi_re[U_size]; pack_type _psi_im[U_size]; std::size_t _pos[U_size]; // Generator of positions auto _get_position = [&pos, &offset, n_pos](std::size_t i, std::size_t j) { std::size_t y{i}; for (std::size_t i = 0; i < n_pos; ++i) { const std::size_t p = pos[i] - offset[i]; const std::size_t y_mask = (1uL << p) - 1; y = ((y & ~y_mask) << 1) ^ (y & y_mask) ^ (((j >> i) & 1uL) << p); } return y; }; #pragma omp parallel for private(_psi_re, _psi_im, _pos) for (std::size_t i = 0; i < (state_size >> n_pos); ++i) { // Get positions for (std::size_t j = 0; j < U_size; ++j) _pos[j] = _get_position(i, j); // Load buffer for (std::size_t j = 0; j < U_size; ++j) { _psi_re[j] = psi_re[_pos[j]]; _psi_im[j] = psi_im[_pos[j]]; } // Compute matrix multiplication for (std::size_t i = 0; i < U_size; ++i) { auto _re = pack_type{0}; auto _im = pack_type{0}; for (std::size_t j = 0; j < U_size; ++j) { const auto _U_re = U_ptr[2 * i * U_size + 2 * j]; const auto _U_im = U_ptr[2 * i * U_size + 2 * j + 1]; _re += _U_re * _psi_re[j] - _U_im * _psi_im[j]; _im += _U_re * _psi_im[j] + _U_im * _psi_re[j]; } // Update state psi_re[_pos[i]] = _re; psi_im[_pos[i]] = _im; } } return 0; } } // namespace hybridq::U #endif
GB_unop__acos_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__acos_fc64_fc64) // op(A') function: GB (_unop_tran__acos_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = cacos (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cacos (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = cacos (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ACOS || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__acos_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = cacos (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = cacos (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__acos_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__ainv_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__ainv_fc64_fc64) // op(A') function: GB (_unop_tran__ainv_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_FC64_ainv (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_FC64_ainv (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_FC64_ainv (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__ainv_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_FC64_ainv (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_FC64_ainv (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__ainv_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
jacobi-ompacc-opt1.c
// Using target data to promote data allocation to higher level, enabling reusing in iterations #include <stdio.h> #include <math.h> #include <assert.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #endif // Add timing support #include <sys/time.h> double time_stamp() { struct timeval t; double time; gettimeofday(&t,(struct timezone*)NULL); time = t.tv_sec + 1.0e-6*t.tv_usec; return time; } double time1, time2; void driver(void); void initialize(void); void jacobi(void); void error_check(void); /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve parallelism. * All do loops are parallelized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define MSIZE 512 int n,m,mits; #define REAL float // flexible between float and double REAL error_ref= 9.212767E-04, resid_ref = 2.355429E-08; // depending on MSIZE!! REAL tol,relax=1.0,alpha=0.0543; REAL u[MSIZE][MSIZE],f[MSIZE][MSIZE],uold[MSIZE][MSIZE]; REAL dx,dy; // value, reference value, and the number of significant digits to be ensured. double diff_ratio (double val, double ref, int significant_digits) { assert (significant_digits>=1); double diff_ratio = fabs(val - ref )/fabs(ref); double upper_limit = pow (0.1, significant_digits); // 1.0/(double(10^significant_digits)) ; printf("value :%E ref_value: %E diff_ratio: %E upper_limit: %E \n",val, ref, diff_ratio, upper_limit); // ensure the number of the significant digits to be the same assert ( diff_ratio < upper_limit); return diff_ratio; } int main (void) { // float toler; /* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE); scanf ("%d",&n); scanf ("%d",&m); printf("Input tol - error tolerance for iterative solver\n"); scanf("%f",&toler); tol=(double)toler; printf("Input mits - Maximum iterations for solver\n"); scanf("%d",&mits); */ n=MSIZE; m=MSIZE; tol=0.0000000001; mits=5000; #if 0 // Not yet support concurrent CPU and GPU threads #ifdef _OPENMP #pragma omp parallel { #pragma omp single printf("Running using %d threads...\n",omp_get_num_threads()); } #endif #endif driver ( ) ; return 0; } /************************************************************* * Subroutine driver () * This is where the arrays are allocated and initialzed. * * Working varaibles/arrays * dx - grid spacing in x direction * dy - grid spacing in y direction *************************************************************/ void driver( ) { initialize(); time1 = time_stamp(); /* Solve Helmholtz equation */ jacobi (); time2 = time_stamp(); printf("------------------------\n"); printf("Execution time = %f\n",time2-time1); /* error_check (n,m,alpha,dx,dy,u,f)*/ error_check ( ); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize( ) { int i,j, xx,yy; //double PI=3.1415926; dx = 2.0 / (n-1); dy = 2.0 / (m-1); /* Initialize initial condition and RHS */ //#pragma omp parallel for private(xx,yy,j,i) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx =(int)( -1.0 + dx * (i-1)); yy = (int)(-1.0 + dy * (j-1)) ; u[i][j] = 0.0; f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\ - 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy); } } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * maxit Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi( ) { REAL omega; int i,j,k; REAL error,resid,ax,ay,b; // double error_local; // float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2; // float te1,te2; // float second; omega=relax; /* * Initialize coefficients */ ax = 1.0/(dx*dx); /* X-direction coef */ ay = 1.0/(dy*dy); /* Y-direction coef */ b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */ error = 10.0 * tol; k = 1; // An optimization on top of naive coding: promoting data handling outside the while loop // data properties may change since the scope is bigger: #pragma omp target data map(to:n, m, omega, ax, ay, b, f[0:n][0:m]) map(tofrom:u[0:n][0:m]) map(alloc:uold[0:n][0:m]) while ((k<=mits)&&(error>tol)) { error = 0.0; /* Copy new solution into old */ //#pragma omp parallel // { #pragma omp target map(to:n, m, u[0:n][0:m]) map(from:uold[0:n][0:m]) #pragma omp parallel for private(j,i) for(i=0;i<n;i++) for(j=0;j<m;j++) uold[i][j] = u[i][j]; #pragma omp target map(to:n, m, omega, ax, ay, b, f[0:n][0:m], uold[0:n][0:m]) map(from:u[0:n][0:m]) #pragma omp parallel for private(resid,j,i) reduction(+:error) // nowait for (i=1;i<(n-1);i++) for (j=1;j<(m-1);j++) { resid = (ax*(uold[i-1][j] + uold[i+1][j])\ + ay*(uold[i][j-1] + uold[i][j+1])+ b * uold[i][j] - f[i][j])/b; u[i][j] = uold[i][j] - omega * resid; error = error + resid*resid ; } // } /* omp end parallel */ /* Error check */ if (k%500==0) printf("Finished %d iteration with error =%f\n",k, error); error = sqrt(error)/(n*m); k = k + 1; } /* End iteration loop */ printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n", error); printf("Residual_ref :%E\n", resid_ref); printf ("Diff ref=%E\n", fabs(error-resid_ref)); assert (fabs(error-resid_ref) < 1E-13); } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check ( ) { int i,j; REAL xx,yy,temp,error; dx = 2.0 / (n-1); dy = 2.0 / (m-1); error = 0.0 ; //#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx = -1.0 + dx * (i-1); yy = -1.0 + dy * (j-1); temp = u[i][j] - (1.0-xx*xx)*(1.0-yy*yy); error = error + temp*temp; } error = sqrt(error)/(n*m); printf("Solution Error :%E \n",error); printf("Solution Error Ref :%E \n",error_ref); printf ("Diff ref=%E\n", fabs(error-error_ref)); assert (fabs(error-error_ref) < 1E-13); }
runLengthMatrix.c
/* * Copyright (C) 2018 by Benedict Paten (benedictpaten@gmail.com) * * Released under the MIT license, see LICENSE.txt */ #include <getopt.h> #include <stdio.h> #include <ctype.h> #include <memory.h> #include <hashTableC.h> #include <unistd.h> #include <time.h> #include "marginVersion.h" #include "margin.h" #include "htsIntegration.h" #include "helenFeatures.h" /* * Main functions */ void usage() { fprintf(stderr, "usage: runLengthMatrix <ALIGN_BAM> <REFERENCE_FASTA> <PARAMS> [options]\n"); fprintf(stderr, "Version: %s \n\n", MARGIN_POLISH_VERSION_H); fprintf(stderr, "Produces a run length matrix of reads in ALIGN_BAM to REFERENCE_FASTA.\n"); fprintf(stderr, "\nRequired arguments:\n"); fprintf(stderr, " ALIGN_BAM is the alignment of reads to the reference.\n"); fprintf(stderr, " REFERENCE_FASTA is the reference sequence BAM file in fasta format.\n"); // fprintf(stderr, " VARIANT_VCF is the set of variants to use for phasing.\n"); fprintf(stderr, " PARAMS is the file with margin parameters.\n"); // fprintf(stderr, "\nDefault options:\n"); fprintf(stderr, " -h --help : Print this help screen\n"); fprintf(stderr, " -a --logLevel : Set the log level [default = info]\n"); # ifdef _OPENMP fprintf(stderr, " -t --threads : Set number of concurrent threads [default = 1]\n"); #endif fprintf(stderr, " -o --outputBase : Name to use for output files [default = 'output']\n"); fprintf(stderr, " -r --region : If set, will only compute for given chromosomal region\n"); fprintf(stderr, " Format: chr:start_pos-end_pos (chr3:2000-3000)\n"); fprintf(stderr, " -p --depth : Will override the downsampling depth set in PARAMS\n"); fprintf(stderr, " -l --maxRunLength : Maximum run length (default 50)\n"); fprintf(stderr, "\n"); } int64_t charToNuclIdx(char nucl, bool forward) { switch (nucl) { case 'a': case 'A': return forward ? 0 : 3; case 'c': case 'C': return forward ? 1 : 2; case 'g': case 'G': return forward ? 2 : 1; case 't': case 'T': return forward ? 3 : 0; default: return -1; } } int64_t getRunLengthArrayIndex(int threadIdx, int64_t nuclIdx, uint64_t refRL, uint64_t readRL, int64_t maxRL) { // bad thread assert(threadIdx >= 0); assert(nuclIdx < 4); int64_t threadPos = threadIdx * 4 * maxRL * maxRL; int64_t nuclPos = nuclIdx * maxRL * maxRL; int64_t refRlPos = (refRL < maxRL ? refRL : maxRL - 1) * maxRL; int64_t readRlPos = (readRL < maxRL ? readRL : maxRL - 1); // bad nucl if (nuclPos < 0) return -1; return threadPos + nuclPos + refRlPos + readRlPos; } int64_t testRunLengthConstruction() { int64_t threadCount = 10; int64_t maxRunLenght = 10; int64_t nuclCount = 4; int64_t maxArraySize = threadCount * nuclCount * maxRunLenght * maxRunLenght; uint64_t *myArray = st_calloc(maxArraySize, sizeof(uint64_t)); for (int thread = 0 ; thread < threadCount; thread++) { for (uint64_t refRl = 0; refRl < maxRunLenght; refRl++) { for (uint64_t readRl = 0; readRl < maxRunLenght; readRl++) { for (int64_t nucl = 0; nucl < nuclCount; nucl++) { char nuc = (nucl==0 ? 'A' : (nucl==1 ? 'C' : (nucl==2 ? 'G' : 'T'))); for (int64_t strand = 0; strand < 2; strand++) { int64_t idx = getRunLengthArrayIndex(thread, charToNuclIdx(nuc, strand == 0), refRl, readRl, maxRunLenght); assert(idx < maxArraySize); myArray[idx] += 1; } } } } } for (int64_t i = 0; i < maxArraySize; i++) { assert(myArray[i] == 2); } free(myArray); } int main(int argc, char *argv[]) { // Parameters / arguments char *logLevelString = stString_copy("critical"); char *bamInFile = NULL; char *referenceFastaFile = NULL; char *paramsFile = NULL; char *outputBase = stString_copy("output"); char *regionStr = NULL; int numThreads = 1; int64_t maxDepth = -1; int64_t maxRunLengthExcl = 51; if (argc < 3) { free(outputBase); free(logLevelString); usage(); return 0; } bamInFile = stString_copy(argv[1]); referenceFastaFile = stString_copy(argv[2]); paramsFile = stString_copy(argv[3]); // Parse the options while (1) { static struct option long_options[] = { { "help", no_argument, 0, 'h' }, { "logLevel", required_argument, 0, 'a' }, # ifdef _OPENMP { "threads", required_argument, 0, 't'}, #endif { "outputBase", required_argument, 0, 'o'}, { "region", required_argument, 0, 'r'}, { "depth", required_argument, 0, 'p'}, { "tempFilesToDisk", no_argument, 0, 'k'}, { "maxRunLength", no_argument, 0, 'l'}, { 0, 0, 0, 0 } }; int option_index = 0; int key = getopt_long(argc-2, &argv[2], "ha:o:p:t:r:l:", long_options, &option_index); if (key == -1) { break; } switch (key) { case 'a': free(logLevelString); logLevelString = stString_copy(optarg); break; case 'h': usage(); return 0; case 'o': free(outputBase); outputBase = getFileBase(optarg, "output"); break; case 'r': regionStr = stString_copy(optarg); break; case 't': numThreads = atoi(optarg); if (numThreads <= 0) { st_errAbort("Invalid thread count: %d", numThreads); } break; case 'l': maxRunLengthExcl = atoi(optarg) +1; if (maxRunLengthExcl < 1) { st_errAbort("Invalid max run length: %s", optarg); } break; default: usage(); free(outputBase); free(logLevelString); free(bamInFile); free(referenceFastaFile); free(paramsFile); return 0; } } // sanity check (verify files exist) if (access(bamInFile, R_OK) != 0) { st_errAbort("Could not read from input bam file: %s\n", bamInFile); char *idx = stString_print("%s.bai", bamInFile); if (access(idx, R_OK) != 0) { st_errAbort("BAM does not appear to be indexed: %s\n", bamInFile); } free(idx); } if (access(referenceFastaFile, R_OK) != 0) { st_errAbort("Could not read from reference fastafile: %s\n", referenceFastaFile); } if (access(paramsFile, R_OK) != 0) { st_errAbort("Could not read from params file: %s\n", paramsFile); } // Initialization from arguments time_t startTime = time(NULL); st_setLogLevelFromString(logLevelString); free(logLevelString); if (st_getLogLevel() >= info) { st_setCallocDebug(true); } # ifdef _OPENMP if (numThreads <= 0) { numThreads = 1; } omp_set_num_threads(numThreads); st_logCritical("Running OpenMP with %d threads.\n", omp_get_max_threads()); # endif //testing testRunLengthConstruction(); // Parse parameters st_logCritical("> Parsing model parameters from file: %s\n", paramsFile); Params *params = params_readParams(paramsFile); // parameter updates st_logInfo(" Setting chunkBoundary to 0\n"); params->polishParams->chunkBoundary = 0; // update depth (if set) if (maxDepth >= 0) { st_logCritical("> Changing maxDepth parameter from %"PRId64" to %"PRId64"\n", params->polishParams->maxDepth, maxDepth); params->polishParams->maxDepth = (uint64_t) maxDepth; } // Print a report of the parsed parameters if (st_getLogLevel() == debug) { params_printParameters(params, stderr); } // get chunker for bam. if regionStr is NULL, it will be ignored time_t chunkingStart = time(NULL); BamChunker *bamChunker = bamChunker_construct2(bamInFile, regionStr, NULL, params->polishParams, TRUE); st_logCritical( "> Set up bam chunker in %"PRId64"s with chunk size %i and overlap %i (for region=%s), resulting in %i total chunks\n", time(NULL) - chunkingStart, (int) bamChunker->chunkSize, (int) bamChunker->chunkBoundary, regionStr == NULL ? "all" : regionStr, bamChunker->chunkCount); if (bamChunker->chunkCount == 0) { st_errAbort("> Found no valid reads!\n"); } // (may) need to shuffle chunks stList *chunkOrder = stList_construct3(0, (void (*)(void *)) stIntTuple_destruct); for (int64_t i = 0; i < bamChunker->chunkCount; i++) { stList_append(chunkOrder, stIntTuple_construct1(i)); } if (params->polishParams->shuffleChunks) { switch (params->polishParams->shuffleChunksMethod) { case SCM_SIZE_DESC: st_logCritical("> Ordering chunks by estimated depth\n"); stList_sort2(chunkOrder, compareBamChunkDepthByIndexInList, bamChunker->chunks); stList_reverse(chunkOrder); break; case SCM_RANDOM: st_logCritical("> Randomly shuffling chunks\n"); stList_shuffle(chunkOrder); break; } } // this is the run length data we want int64_t totalSize = numThreads * 4 * maxRunLengthExcl * maxRunLengthExcl; uint64_t *runLengthDataForAllThreads = st_calloc(totalSize, sizeof(uint64_t)); // multiproccess the chunks, save to results st_logCritical("> Setup complete, beginning run\n"); int64_t lastReportedPercentage = 0; time_t polishStartTime = time(NULL); # ifdef _OPENMP #pragma omp parallel for schedule(dynamic,1) # endif for (int64_t i = 0; i < bamChunker->chunkCount; i++) { int64_t chunkIdx = stIntTuple_get(stList_get(chunkOrder, i), 0); // Time all chunks time_t chunkStartTime = time(NULL); // Get chunk BamChunk *bamChunk = bamChunker_getChunk(bamChunker, chunkIdx); // logging char *logIdentifier; bool logProgress = FALSE; int64_t currentPercentage = (int64_t) (100 * i / bamChunker->chunkCount); # ifdef _OPENMP int64_t threadIdx = omp_get_thread_num(); logIdentifier = stString_print(" T%02d_C%05"PRId64, threadIdx, chunkIdx); if (threadIdx == 0) { if (currentPercentage != lastReportedPercentage) { logProgress = TRUE; lastReportedPercentage = currentPercentage; } } # else int64_t threadIdx = 0; logIdentifier = stString_copy(""); if (currentPercentage != lastReportedPercentage) { logProgress = TRUE; lastReportedPercentage = currentPercentage; } # endif // prints percentage complete and estimated time remaining if (logProgress) { // log progress int64_t timeTaken = (int64_t) (time(NULL) - polishStartTime); int64_t secondsRemaining = (int64_t) floor(1.0 * timeTaken / currentPercentage * (100 - currentPercentage)); char *timeDescriptor = (secondsRemaining == 0 && currentPercentage <= 50 ? stString_print("unknown") : getTimeDescriptorFromSeconds(secondsRemaining)); st_logCritical("> Polishing %2"PRId64"%% complete (%"PRId64"/%"PRId64"). Estimated time remaining: %s\n", currentPercentage, i, bamChunker->chunkCount, timeDescriptor); free(timeDescriptor); } RleString *rleReference = bamChunk_getReferenceSubstring(bamChunk, referenceFastaFile, params); st_logInfo(">%s Going to process a chunk for reference sequence: %s, starting at: %i and ending at: %i\n", logIdentifier, bamChunk->refSeqName, (int) bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd); // Convert bam lines into corresponding reads and alignments stList *reads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct); stList *alignments = stList_construct3(0, (void (*)(void *)) stList_destruct); stList *filteredReads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct); stList *filteredAlignments = stList_construct3(0, (void (*)(void *)) stList_destruct); convertToReadsAndAlignments(bamChunk, rleReference, reads, alignments, params->polishParams); // do downsampling if appropriate if (params->polishParams->maxDepth > 0) { // get downsampling structures stList *maintainedReads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct); stList *maintainedAlignments = stList_construct3(0, (void (*)(void *)) stList_destruct); bool didDownsample = downsampleViaReadLikelihood(params->polishParams->maxDepth, bamChunk, reads, alignments, maintainedReads, maintainedAlignments, filteredReads, filteredAlignments); // we need to destroy the discarded reads and structures if (didDownsample) { st_logInfo(" %s Downsampled from %"PRId64" to %"PRId64" reads\n", logIdentifier, stList_length(reads), stList_length(maintainedReads)); // still has all the old reads, need to not free these stList_setDestructor(reads, NULL); stList_setDestructor(alignments, NULL); stList_destruct(reads); stList_destruct(alignments); // and keep the filtered reads reads = maintainedReads; alignments = maintainedAlignments; } // no downsampling, we just need to free the (empty) objects else { assert(stList_length(maintainedReads) == 0); assert(stList_length(maintainedAlignments) == 0); stList_destruct(maintainedReads); stList_destruct(maintainedAlignments); } } // prep for polishing Poa *poa = NULL; // The poa alignment // Generate partial order alignment (POA) (destroys rleAlignments in the process) poa = poa_realignOnlyAnchorAlignments(reads, alignments, rleReference, params->polishParams); for (int64_t pos = 1; pos < stList_length(poa->nodes); pos++) { PoaNode *node = stList_get(poa->nodes, pos); char refNucl = node->base; uint64_t refRL = node->repeatCount; for (int64_t o = 0; o < stList_length(node->observations); o++) { PoaBaseObservation *obs = stList_get(node->observations, o); BamChunkRead *read = stList_get(reads, obs->readNo); char readNucl = read->rleRead->rleString[obs->offset]; uint64_t readRL = read->rleRead->repeatCounts[obs->offset]; if (readNucl == refNucl) { int64_t idx = getRunLengthArrayIndex(threadIdx, charToNuclIdx(readNucl, read->forwardStrand), refRL, readRL, maxRunLengthExcl); if (idx < 0) { continue; } assert(idx < totalSize); runLengthDataForAllThreads[idx] += 1; } } } // report timing if (st_getLogLevel() >= info) { st_logInfo(">%s Chunk with ~%"PRId64" reads processed in %d sec\n", logIdentifier, stList_length(reads), (int) (time(NULL) - chunkStartTime)); } // final post-completion logging cleanup poa_destruct(poa); rleString_destruct(rleReference); stList_destruct(reads); stList_destruct(alignments); stList_destruct(filteredReads); stList_destruct(filteredAlignments); free(logIdentifier); } st_logCritical("> Consolidating all run lengths\n"); // condense all values uint64_t *condensedRunLengthArray = st_calloc(4 * maxRunLengthExcl * maxRunLengthExcl, sizeof(uint64_t)); for (int t = 0; t < numThreads; t++) { for (int64_t nucl = 0; nucl < 4; nucl++) { for (uint64_t refRL = 1; refRL < maxRunLengthExcl; refRL++) { for (uint64_t readRL = 1; readRL < maxRunLengthExcl; readRL++) { int64_t fullDataPos = getRunLengthArrayIndex(t, nucl, refRL, readRL, maxRunLengthExcl); int64_t condensedPos = getRunLengthArrayIndex(0, nucl, refRL, readRL, maxRunLengthExcl); assert(fullDataPos >= 0); assert(condensedPos >= 0); condensedRunLengthArray[condensedPos] += runLengthDataForAllThreads[fullDataPos]; } } } } // printit char *countFilenameA = stString_print("%s.run_lengths.A.tsv", outputBase); FILE *countFileA = fopen(countFilenameA, "w"); char *countFilenameC = stString_print("%s.run_lengths.C.tsv", outputBase); FILE *countFileC = fopen(countFilenameC, "w"); char *countFilenameG = stString_print("%s.run_lengths.G.tsv", outputBase); FILE *countFileG = fopen(countFilenameG, "w"); char *countFilenameT = stString_print("%s.run_lengths.T.tsv", outputBase); FILE *countFileT = fopen(countFilenameT, "w"); if (countFileA == NULL || countFileC == NULL || countFileG == NULL || countFileT == NULL) { st_errAbort("Could not open output files for writing!", countFilenameA); } else { st_logCritical("> Writing counts to %s, %s, %s %s\n", countFilenameA, countFilenameC, countFilenameG, countFilenameT); } for (uint64_t refRL = 0; refRL < maxRunLengthExcl; refRL++) { for (uint64_t readRL = 0; readRL < maxRunLengthExcl; readRL++) { if (refRL == 0) { // header if (readRL == 0) { fprintf(countFileA, "#ref_rl"); fprintf(countFileC, "#ref_rl"); fprintf(countFileG, "#ref_rl"); fprintf(countFileT, "#ref_rl"); } else { fprintf(countFileA, "read_%"PRId64"%s", readRL, readRL == maxRunLengthExcl - 1 ? "+" : ""); fprintf(countFileC, "read_%"PRId64"%s", readRL, readRL == maxRunLengthExcl - 1 ? "+" : ""); fprintf(countFileG, "read_%"PRId64"%s", readRL, readRL == maxRunLengthExcl - 1 ? "+" : ""); fprintf(countFileT, "read_%"PRId64"%s", readRL, readRL == maxRunLengthExcl - 1 ? "+" : ""); } } else { if (readRL == 0) { // header (ish) fprintf(countFileA, "%"PRIu64, refRL); fprintf(countFileC, "%"PRIu64, refRL); fprintf(countFileG, "%"PRIu64, refRL); fprintf(countFileT, "%"PRIu64, refRL); } else { // data int64_t condensedPosA = getRunLengthArrayIndex(0, charToNuclIdx('A', TRUE), refRL, readRL, maxRunLengthExcl); int64_t condensedPosC = getRunLengthArrayIndex(0, charToNuclIdx('C', TRUE), refRL, readRL, maxRunLengthExcl); int64_t condensedPosG = getRunLengthArrayIndex(0, charToNuclIdx('G', TRUE), refRL, readRL, maxRunLengthExcl); int64_t condensedPosT = getRunLengthArrayIndex(0, charToNuclIdx('T', TRUE), refRL, readRL, maxRunLengthExcl); uint64_t countA = condensedRunLengthArray[condensedPosA]; uint64_t countC = condensedRunLengthArray[condensedPosC]; uint64_t countG = condensedRunLengthArray[condensedPosG]; uint64_t countT = condensedRunLengthArray[condensedPosT]; fprintf(countFileA, "%"PRIu64, countA); fprintf(countFileC, "%"PRIu64, countC); fprintf(countFileG, "%"PRIu64, countG); fprintf(countFileT, "%"PRIu64, countT); } } // increment if (readRL == maxRunLengthExcl - 1) { fprintf(countFileA, "\n"); fprintf(countFileC, "\n"); fprintf(countFileG, "\n"); fprintf(countFileT, "\n"); } else { fprintf(countFileA, "\t"); fprintf(countFileC, "\t"); fprintf(countFileG, "\t"); fprintf(countFileT, "\t"); } } } // close files fclose(countFileA); fclose(countFileC); fclose(countFileG); fclose(countFileT); // cleanup free(countFilenameA); free(countFilenameC); free(countFilenameG); free(countFilenameT); free(condensedRunLengthArray); free(runLengthDataForAllThreads); bamChunker_destruct(bamChunker); params_destruct(params); if (regionStr != NULL) free(regionStr); stList_destruct(chunkOrder); free(outputBase); free(bamInFile); free(referenceFastaFile); free(paramsFile); // log completion char *timeDescriptor = getTimeDescriptorFromSeconds(time(NULL) - startTime); st_logCritical("> Finished generating run length matrix in %s.\n", timeDescriptor); free(timeDescriptor); // while(1); // Use this for testing for memory leaks return 0; }
parallel_ublas_space.h
/* ============================================================================== Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi pooyan@cimne.upc.edu rrossi@cimne.upc.edu CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNER. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ // // Project Name: Kratos // Last Modified by: $Author: rrossi $ // Date: $Date: 2008-11-10 14:23:33 $ // Revision: $Revision: 1.5 $ // // #if !defined(KRATOS_PARALLEL_UBLAS_SPACE_H_INCLUDED ) #define KRATOS_PARALLEL_UBLAS_SPACE_H_INCLUDED // System includes #include <string> #include <iostream> #include <cstddef> #include "omptl" #include "omptl_algorithm" #include "omptl_numeric" // External includes // Project includes #include "includes/define.h" #include "includes/ublas_interface.h" namespace Kratos { // The function object multiplies an element by a Factor template <class Type> class MultValue { private: Type Factor; // The value to multiply by public: // Constructor initializes the value to multiply by MultValue ( const Type& _Val ) : Factor ( _Val ) { } // The function call for the element to be multiplied Type operator ( ) ( Type& elem ) const { return elem * Factor; } }; ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. /** Detail class definition. */ template<class TDataType, class TMatrixType, class TVectorType> class ParallelUblasSpace { public: ///@name Type Definitions ///@{ /// Pointer definition of ParallelUblasSpace KRATOS_CLASS_POINTER_DEFINITION(ParallelUblasSpace); typedef TDataType DataType; typedef TMatrixType MatrixType; typedef TVectorType VectorType; typedef std::size_t IndexType; typedef std::size_t SizeType; typedef typename boost::shared_ptr< TMatrixType > MatrixPointerType; typedef typename boost::shared_ptr< TVectorType > VectorPointerType; ///@} ///@name Life Cycle ///@{ /// Default constructor. ParallelUblasSpace(){} /// Destructor. virtual ~ParallelUblasSpace(){} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ static MatrixPointerType CreateEmptyMatrixPointer(){return MatrixPointerType( new TMatrixType() ); } static VectorPointerType CreateEmptyVectorPointer(){return VectorPointerType( new TVectorType() ); } /// return size of vector rV static IndexType Size(VectorType const& rV){return rV.size();} /// return number of rows of rM static IndexType Size1(MatrixType const& rM){return rM.size1();} /// return number of columns of rM static IndexType Size2(MatrixType const& rM){return rM.size2();} /// rXi = rMij static void GetColumn(unsigned int j, MatrixType& rM, VectorType& rX){rX = column(rM, j);} /// rMij = rXi static void SetColumn(unsigned int j, MatrixType& rM, VectorType& rX){rX = row(rM, j);} /// rY = rX static void Copy(MatrixType const& rX, MatrixType& rY) { // :TODO: Parallelize rY.assign(rX); // omptl::copy( rX.begin(), rX.end(), rY.begin() ); } /// rY = rX static void Copy(VectorType const& rX, VectorType& rY) { // rY.assign(rX); omptl::copy( rX.begin(), rX.end(), rY.begin() ); } /// rX * rY static TDataType Dot(VectorType const& rX, VectorType const& rY) { vector<unsigned int> partition; int number_of_threads = omp_get_max_threads(); CreatePartition(number_of_threads, rX.size(), partition); vector< TDataType > partial_results(number_of_threads); int i; #pragma omp parallel for default(shared) private(i) for(i = 0; i<number_of_threads; i++) { partial_results[i] = std::inner_product( rX.data().begin()+partition[i], rX.data().begin()+partition[i+1], rY.data().begin()+partition[i], TDataType() ); } double total = TDataType(); for(int i = 0; i<number_of_threads; i++) total += partial_results[i]; // return inner_prod(rX, rY); return total; } /// ||rX||2 static double TwoNorm(VectorType const& rX) { return sqrt( Dot( rX, rX ) ); // return norm_2(rX); } static void Mult(MatrixType& rA, VectorType& rX, VectorType& rY) { ParallelProductNoAdd( rA, rX, rY ); // axpy_prod(rA, rX, rY, true); }// rY = rA * rX static void TransposeMult(MatrixType& rA, VectorType& rX, VectorType& rY) { // :TODO: Parallelize axpy_prod(rX, rA, rY, true); }// rY = rAT * rX static inline SizeType GraphDegree( IndexType i, TMatrixType& A) { typename MatrixType::iterator1 a_iterator = A.begin1(); std::advance(a_iterator,i); #ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION return( std::distance( a_iterator.begin(), a_iterator.end() ) ); #else return( std::distance( begin(a_iterator, boost::numeric::ublas::iterator1_tag()), end(a_iterator, boost::numeric::ublas::iterator1_tag()) ) ); #endif } static inline void GraphNeighbors( IndexType i, TMatrixType& A, std::vector<IndexType>& neighbors) { neighbors.clear(); typename MatrixType::iterator1 a_iterator = A.begin1(); std::advance(a_iterator,i); #ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION for (typename MatrixType::iterator2 row_iterator = a_iterator.begin() ; row_iterator != a_iterator.end() ; ++row_iterator) { #else for ( typename MatrixType::iterator2 row_iterator = begin(a_iterator, boost::numeric::ublas::iterator1_tag()); row_iterator != end(a_iterator, boost::numeric::ublas::iterator1_tag()); ++row_iterator ) { #endif neighbors.push_back( row_iterator.index2() ); } } //******************************************************************** //checks if a multiplication is needed and tries to do otherwise static void InplaceMult(VectorType& rX, const double A) { if( A == 1.00) {} else if( A == -1.00) { omptl::transform( rX.begin(), rX.end(), rX.begin(), std::negate<double>() ); //rX *= A; // typename VectorType::iterator x_iterator = rX.begin(); // typename VectorType::iterator end_iterator = rX.end(); // while(x_iterator != end_iterator) // { // *x_iterator = -*x_iterator; // x_iterator++; // // } } else { // rX *= A; omptl::transform( rX.begin(), rX.end(), rX.begin(), MultValue<double>( A ) ); } } //******************************************************************** //checks if a multiplication is needed and tries to do otherwise //ATTENTION it is assumed no aliasing between rX and rY // X = A*y; static void Assign(VectorType& rX, const double A, const VectorType& rY) { if( A == 1.00) omptl::copy( rY.begin(), rY.end(), rX.begin() ); // noalias(rX) = rY; else if( A == -1.00) { omptl::transform( rY.begin(), rY.end(), rX.begin(), std::negate<double>() ); } // noalias(rX) = -rY; else { omptl::transform( rY.begin(), rY.end(), rX.begin(), MultValue<double>( A ) ); //TODO .. parallelize // noalias(rX) = A*rY; } } //******************************************************************** //checks if a multiplication is needed and tries to do otherwise //ATTENTION it is assumed no aliasing between rX and rY // X += A*y; static void UnaliasedAdd(VectorType& rX, const double A, const VectorType& rY) { if( A == 1.00) { // noalias(rX) += rY; omptl::transform( rY.data().begin(), rY.data().end(), rX.data().begin(), rX.data().begin(), std::plus<double>() ); } else if( A == -1.00) { noalias(rX) -= rY; omptl::transform( rY.data().begin(), rY.data().end(), rX.data().begin(), rX.data().begin(), std::minus<double>() ); // omptl::transform( rY.data().begin(), rY.data().end(), rX.data().begin(), std::minus<double>() ); } else { //TODO: parallelize!!! noalias(rX) += A*rY; } } //******************************************************************** static void ScaleAndAdd(const double A, const VectorType& rX, const double B, const VectorType& rY, VectorType& rZ) // rZ = (A * rX) + (B * rY) { Assign(rZ,A,rX); //rZ = A*rX UnaliasedAdd(rZ,B,rY); //rZ += B*rY //KRATOS_WATCH(rZ); //typename VectorType::const_iterator x_iterator = rX.begin(); //typename VectorType::const_iterator y_iterator = rY.begin(); //typename VectorType::iterator z_iterator = rZ.begin(); //typename VectorType::const_iterator end_iterator = rX.end(); //while(x_iterator != end_iterator) // *z_iterator++ = (A * *x_iterator++) + (B * *y_iterator++); } static void ScaleAndAdd(const double A,const VectorType& rX, const double B, VectorType& rY) // rY = (A * rX) + (B * rY) { InplaceMult(rY,B); UnaliasedAdd(rY,A,rX); //KRATOS_WATCH(rY); //typename VectorType::const_iterator x_iterator = rX.begin(); //typename VectorType::iterator y_iterator = rY.begin(); //typename VectorType::const_iterator end_iterator = rX.end(); // //double c = B - double(1.00); //while(x_iterator != end_iterator) // { // *y_iterator += (A * *x_iterator++) + (c * *y_iterator); // y_iterator++; // } } /// rA[i] * rX //will be most probably faster in serial as the rows are short static double RowDot(unsigned int i, MatrixType& rA, VectorType& rX) { // return omptl::inner_product( row(rA, i).begin(), row(rA, i).end(), rX.begin() ); return inner_prod(row(rA, i), rX); } /// rX = A static void Set(VectorType& rX, TDataType A) { //std::fill(rX.begin(), rX.end(), A); omptl::fill(rX.begin(), rX.end(), A ); } static void Resize(MatrixType& rA, SizeType m, SizeType n){rA.resize(m,n,false);} static void Resize(VectorType& rX, SizeType n) {rX.resize(n,false);} static void Clear(MatrixType& rA) {rA.clear();} static void Clear(VectorType& rX) {rX.clear();} template<class TOtherMatrixType> inline static void ClearData(TOtherMatrixType& rA){rA.clear();} inline static void ClearData(compressed_matrix<TDataType>& rA) { //rA.clear(); omptl::fill( rA.value_data().begin(), rA.value_data().end(), 0.0 ); } inline static void ClearData(VectorType& rX) {rX = VectorType();} // template<class TOtherMatrixType> // inline static void ResizeData(TOtherMatrixType& rA, SizeType m){rA.resize(m,false); // std::fill(rA.begin(), rA.end(), TDataType());} inline static void ResizeData(compressed_matrix<TDataType>& rA, SizeType m) {rA.value_data().resize(m); omptl::fill(rA.value_data().begin(), rA.value_data().end(), TDataType());} inline static void ResizeData(VectorType& rX, SizeType m) {rX.resize(m); omptl::fill(rX.begin(), rX.end(), TDataType());} template<class TOtherMatrixType> inline static void SetToZero(TOtherMatrixType& rA) {std::fill(rA.begin(), rA.end(), TDataType());} inline static void SetToZero(compressed_matrix<TDataType>& rA) { KRATOS_TRY omptl::fill(rA.value_data().begin(), rA.value_data().end(), TDataType()); /* typedef unsigned int size_type; typedef double value_type; size_type begin = rA.index1_data () [0]; size_type end = rA.index1_data () [rA.size1()]; for (size_type j = begin; j < end; ++ j) { rA.value_data()[j] = TDataType(); }*/ KRATOS_CATCH(""); } inline static void SetToZero(VectorType& rX) {omptl::fill(rX.begin(), rX.end(), TDataType());} ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { return "ParallelUblasSpace"; } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "ParallelUblasSpace"; } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ //y += A*x in parallel static void ParallelProductNoAdd( const MatrixType& A, const VectorType& in, VectorType& out) { //std::cout << "in function ParallelProductNoAdd" << std::endl; typedef unsigned int size_type; typedef double value_type; //create partition vector<size_type> partition; int number_of_threads = omp_get_max_threads(); CreatePartition(number_of_threads, A.size1(), partition); //parallel loop size_type processor_row_begin, processor_row_end; int proc_id = 0; #pragma omp parallel { int thread_id = omp_get_thread_num(); int number_of_rows = partition[thread_id+1] - partition[thread_id]; typename MatrixType::index_array_type::const_iterator row_iter_begin = A.index1_data().begin()+partition[thread_id]; typename MatrixType::index_array_type::const_iterator index_2_begin = A.index2_data().begin()+*row_iter_begin; typename MatrixType::value_array_type::const_iterator value_begin = A.value_data().begin()+*row_iter_begin; typename VectorType::iterator output_vec_begin = out.begin()+partition[thread_id]; partial_product_no_add( number_of_rows, row_iter_begin, index_2_begin, value_begin, in, output_vec_begin ); } } static void CreatePartition(unsigned int number_of_threads,const int number_of_rows, vector<unsigned int>& partitions) { partitions.resize(number_of_threads+1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for(int i = 1; i<number_of_threads; i++) partitions[i] = partitions[i-1] + partition_size ; } /** * calculates partial product resetting to Zero the output before */ static void partial_product_no_add( int number_of_rows, typename TMatrixType::index_array_type::const_iterator row_begin, typename TMatrixType::index_array_type::const_iterator index2_begin, typename TMatrixType::value_array_type::const_iterator value_begin, const VectorType& input_vec, typename VectorType::iterator output_vec_begin ) { int row_size; typename MatrixType::index_array_type::const_iterator row_it = row_begin; for(int k = 0; k < number_of_rows; k++) { row_size= *(row_it+1)-*row_it; row_it++; TDataType t = TDataType(); for(int i = 0; i<row_size; i++) t += *value_begin++ * ( input_vec[*index2_begin++]); *output_vec_begin++ = t; } } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. ParallelUblasSpace& operator=(ParallelUblasSpace const& rOther); /// Copy constructor. ParallelUblasSpace(ParallelUblasSpace const& rOther); ///@} }; // Class ParallelUblasSpace ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function // inline std::istream& operator >> (std::istream& rIStream, // ParallelUblasSpace& rThis); // /// output stream function // inline std::ostream& operator << (std::ostream& rOStream, // const ParallelUblasSpace& rThis) // { // rThis.PrintInfo(rOStream); // rOStream << std::endl; // rThis.PrintData(rOStream); // return rOStream; // } ///@} } // namespace Kratos. #endif // KRATOS_PARALLEL_UBLAS_SPACE_H_INCLUDED defined
openaccrt.h
#ifndef __OPENARC_HEADER__ #define __OPENARC_HEADER__ #include <cstring> #include <map> #include <vector> #include <set> #include <iostream> #include <stdio.h> #include <stdlib.h> #include <string> #include <unistd.h> //Comment out below to disable pthread-based thread safety. #define _THREAD_SAFETY #ifdef _OPENMP #include <omp.h> //#ifndef _THREAD_SAFETY //#define _THREAD_SAFETY //#endif #endif #ifdef _THREAD_SAFETY #include <pthread.h> #endif #define DEFAULT_QUEUE -2 #define DEFAULT_ASYNC_QUEUE -1 #define DEVICE_NUM_UNDEFINED -1 #define MAX_NUM_QUEUES_PER_THREAD 1048576 #define NO_THREAD_ID -1 //VICTIM_CACHE_MODE = 0 // - Victim cache is not used //VICTIM_CACHE_MODE = 1 // - Victim cache stores freed device memory // - The freed device memory can be reused if the size matches. // - More applicable, but host memory should be pinned again. //VICTIM_CACHE_MODE = 2 // - Victim cache stores both pinned host memory and corresponding device memory. // - The pinned host memory and device memory are reused only for the same // host pointer; saving both memory pinning cost and device memory allocation cost // but less applicable. // - Too much prepinning can cause slowdown or crash of the program. // - If OPENARCRT_PREPINHOSTMEM is set to 0, reuse only device memory. // //For OpenCL devices, VICTIM_CACHE_MODE = 1 is always applied. #define VICTIM_CACHE_MODE 1 //PRESENT_TABLE_SEARCH_MODE = 0 // - Assume that the elements in the container follow a strict order at all times // - Use a linear search if the table does not have a key entry matching the input key. // - base pointer search time: O(logn) // - intermediate pointer search time: O(n) //PRESENT_TABLE_SEARCH_MODE = 1 // - Assume that the elements in the container follow a strict order at all times // - Exploit the strict ordering if the table does not have a key entry matching the input key. // - base pointer search time: O(logn) // - intermediate pointer search time: O(logn) #define PRESENT_TABLE_SEARCH_MODE 1 #ifdef _THREAD_SAFETY extern pthread_mutex_t mutex_HI_init; extern pthread_mutex_t mutex_HI_hostinit; extern pthread_mutex_t mutex_HI_kernelnames; extern pthread_mutex_t mutex_pin_host_memory; extern pthread_mutex_t mutex_victim_cache; extern pthread_mutex_t mutex_tempMalloc; extern pthread_mutex_t mutex_set_async; extern pthread_mutex_t mutex_set_device_num; extern pthread_mutex_t mutex_clContext; #endif typedef enum { HI_success = 0, HI_error = 1 } HI_error_t; typedef enum { HI_MemcpyHostToHost = 0, HI_MemcpyHostToDevice = 1, HI_MemcpyDeviceToHost = 2, HI_MemcpyDeviceToDevice = 3 } HI_MemcpyKind_t; typedef enum { HI_MEM_READ_WRITE = 1, HI_MEM_READ_ONLY = 2, HI_MEM_WRITE_ONLY = 4, } HI_MallocKind_t; typedef enum { HI_notstale = 0, HI_stale = 1, HI_maystale = 2 } HI_memstatus_t; typedef enum { HI_int = 0, HI_float = 1, } HI_datatype_t; typedef struct _HI_device_mem_handle_t { void* basePtr; size_t offset; } HI_device_mem_handle_t; typedef struct _addresstable_entity_t { void* basePtr; size_t size; _addresstable_entity_t(void* _basePtr, size_t _size) : basePtr(_basePtr), size(_size) {} } addresstable_entity_t; typedef std::map<const void *, void *> addressmap_t; typedef std::map<int, addressmap_t *> addresstable_t; typedef std::multimap<int, const void *> asyncfreetable_t; typedef std::map<int, asyncfreetable_t *> asyncfreetablemap_t; typedef std::multimap<int, void **> asynctempfreetable_t; typedef std::map<int, asynctempfreetable_t *> asynctempfreetablemap_t; typedef std::multimap<int, acc_device_t> asynctempfreetable2_t; typedef std::map<int, asynctempfreetable2_t *> asynctempfreetablemap2_t; typedef std::set<const void *> pointerset_t; typedef std::map<int, addresstable_t *> addresstablemap_t; typedef std::multimap<size_t, void *> memPool_t; typedef std::map<const void *, size_t> sizemap_t; typedef std::map<int, memPool_t *> memPoolmap_t; typedef std::map<int, sizemap_t *> memPoolSizemap_t; typedef std::map<void *, int> countermap_t; typedef std::map<int, addressmap_t *> asyncphostmap_t; typedef std::map<int, sizemap_t *> asynchostsizemap_t; typedef std::map<const void *, HI_memstatus_t> memstatusmap_t; #ifdef _OPENARC_PROFILE_ typedef std::map<int, long> presenttablecnt_t; #endif extern int HI_openarcrt_verbosity; extern int HI_hostinit_done; extern int HI_num_hostthreads; //[CAUTION] For each device, there exists only one Accelerator object, //and thus accessing Accelerator members are not thread-safe. typedef class Accelerator { public: // Device info acc_device_t dev; int device_num; int num_devices; int init_done; int unifiedMemSupported; int compute_capability_major; int compute_capability_minor; int maxGridX, maxGridY, maxGridZ; int maxBlockX, maxBlockY, maxBlockZ; int maxNumThreadsPerBlock; int max1DTexRefWidth4LM; //kernel names that will be offloaded to this device. std::set<std::string> kernelNameSet; //Output kernel file name base. (Default value: "openarc_kernel") std::string fileNameBase; //Host-device address mapping table, augmented with stream id //addresstable_t masterAddressTable; addresstablemap_t masterAddressTableMap; //device-address-to-memory-handle mapping table, which is needed //for OpenCL backend only. //Current implementation uses a fake device virtual address for OpenCL, //which should be translated to actual cl_mem handle. addresstable_t masterHandleTable; //Auxiliary Host-device address mapping table used as a victim cache. addresstable_t auxAddressTable; //temporarily allocated memory set. pointerset_t tempMallocSet; //Host-TempHost address mapping table, augmented with stream id addresstable_t tempHostAddressTable; //This table can have duplicate entries, owing to the HI_free_async //calls in a loop. To handle this, HI_free ensures that on a duplicate //pair, no free operation is performed //asyncfreetable_t postponedFreeTable; asyncfreetablemap_t postponedFreeTableMap; asynctempfreetablemap_t postponedTempFreeTableMap; asynctempfreetablemap2_t postponedTempFreeTableMap2; //memPool_t memPool; memPoolmap_t memPoolMap; memPoolSizemap_t tempMallocSizeMap; #ifdef _OPENARC_PROFILE_ presenttablecnt_t presentTableCntMap; #endif virtual ~Accelerator() {}; // Kernel Initialization virtual HI_error_t init(int threadID=NO_THREAD_ID) = 0; virtual HI_error_t destroy(int threadID=NO_THREAD_ID)=0; // Kernel Execution virtual HI_error_t HI_register_kernels(std::set<std::string>kernelNames, int threadID=NO_THREAD_ID) = 0; virtual HI_error_t HI_register_kernel_numargs(std::string kernel_name, int num_args, int threadID=NO_THREAD_ID) = 0; virtual HI_error_t HI_register_kernel_arg(std::string kernel_name, int arg_index, size_t arg_size, void *arg_value, int arg_type, int threadID=NO_THREAD_ID) = 0; virtual HI_error_t HI_kernel_call(std::string kernel_name, size_t gridSize[3], size_t blockSize[3], int async=DEFAULT_QUEUE, int num_waits=0, int *waits=NULL, int threadID=NO_THREAD_ID) = 0; virtual HI_error_t HI_synchronize( int forcedSync = 0, int threadID=NO_THREAD_ID )=0; void updateKernelNameSet(std::set<std::string>kernelNames) { for (std::set<std::string>::iterator it = kernelNames.begin() ; it != kernelNames.end(); ++it) { if( kernelNameSet.count(*it) == 0 ) { //Add a new kernel. kernelNameSet.insert(*it); } } }; // Memory Allocation virtual HI_error_t HI_malloc1D(const void *hostPtr, void **devPtr, size_t count, int asyncID, HI_MallocKind_t flags=HI_MEM_READ_WRITE, int threadID=NO_THREAD_ID)= 0; virtual HI_error_t HI_malloc2D( const void *host_ptr, void** dev_ptr, size_t* pitch, size_t widthInBytes, size_t height, int asyncID, HI_MallocKind_t flags=HI_MEM_READ_WRITE, int threadID=NO_THREAD_ID)=0; virtual HI_error_t HI_malloc3D( const void *host_ptr, void** dev_ptr, size_t* pitch, size_t widthInBytes, size_t height, size_t depth, int asyncID, HI_MallocKind_t flags=HI_MEM_READ_WRITE, int threadID=NO_THREAD_ID)=0; virtual HI_error_t HI_free( const void *host_ptr, int asyncID, int threadID=NO_THREAD_ID)=0; virtual HI_error_t HI_pin_host_memory(const void * hostPtr, size_t size, int threadID=NO_THREAD_ID)=0; virtual void HI_unpin_host_memory(const void* hostPtr, int threadID=NO_THREAD_ID)=0; // Memory Transfer virtual HI_error_t HI_memcpy(void *dst, const void *src, size_t count, HI_MemcpyKind_t kind, int trType, int threadID=NO_THREAD_ID)=0; virtual HI_error_t HI_memcpy_async(void *dst, const void *src, size_t count, HI_MemcpyKind_t kind, int trType, int async, int num_waits, int *waits, int threadID=NO_THREAD_ID)=0; virtual HI_error_t HI_memcpy_asyncS(void *dst, const void *src, size_t count, HI_MemcpyKind_t kind, int trType, int async, int num_waits, int *waits, int threadID=NO_THREAD_ID)=0; virtual HI_error_t HI_memcpy2D(void *dst, size_t dpitch, const void *src, size_t spitch, size_t widthInBytes, size_t height, HI_MemcpyKind_t kind, int threadID=NO_THREAD_ID)=0; virtual HI_error_t HI_memcpy2D_async(void *dst, size_t dpitch, const void *src, size_t spitch, size_t widthInBytes, size_t height, HI_MemcpyKind_t kind, int async, int num_waits, int *waits, int threadID=NO_THREAD_ID)=0; virtual void HI_tempMalloc1D( void** tempPtr, size_t count, acc_device_t devType, HI_MallocKind_t flags, int threadID=NO_THREAD_ID)=0; virtual void HI_tempFree( void** tempPtr, acc_device_t devType, int threadID=NO_THREAD_ID)=0; // Experimental API to support unified memory // virtual HI_error_t HI_malloc1D_unified(const void *hostPtr, void **devPtr, size_t count, int asyncID, HI_MallocKind_t flags, int threadID=NO_THREAD_ID)= 0; virtual HI_error_t HI_memcpy_unified(void *dst, const void *src, size_t count, HI_MemcpyKind_t kind, int trType, int threadID=NO_THREAD_ID)=0; virtual HI_error_t HI_free_unified( const void *host_ptr, int asyncID, int threadID=NO_THREAD_ID)=0; virtual HI_error_t createKernelArgMap(int threadID=NO_THREAD_ID) { return HI_success; } virtual HI_error_t HI_bind_tex(std::string texName, HI_datatype_t type, const void *devPtr, size_t size) { return HI_success; } virtual HI_error_t HI_memcpy_const(void *hostPtr, std::string constName, HI_MemcpyKind_t kind, size_t count, int threadID=NO_THREAD_ID) { return HI_success; } virtual HI_error_t HI_memcpy_const_async(void *hostPtr, std::string constName, HI_MemcpyKind_t kind, size_t count, int async, int num_waits, int *waits, int threadID=NO_THREAD_ID) { return HI_success; } virtual HI_error_t HI_present_or_memcpy_const(void *hostPtr, std::string constName, HI_MemcpyKind_t kind, size_t count, int threadID=NO_THREAD_ID) { return HI_success; } virtual void HI_set_async(int asyncId, int threadID=NO_THREAD_ID)=0; virtual void HI_set_context(int threadID=NO_THREAD_ID){} virtual void HI_wait(int arg, int threadID=NO_THREAD_ID) {} virtual void HI_wait_ifpresent(int arg, int threadID=NO_THREAD_ID) {} virtual void HI_wait_async(int arg, int async, int threadID=NO_THREAD_ID) {} virtual void HI_wait_async_ifpresent(int arg, int async, int threadID=NO_THREAD_ID) {} virtual void HI_waitS1(int arg, int threadID=NO_THREAD_ID) {} virtual void HI_waitS2(int arg, int threadID=NO_THREAD_ID) {} virtual void HI_wait_all(int threadID=NO_THREAD_ID) {} virtual void HI_wait_all_async(int async, int threadID=NO_THREAD_ID) {} virtual int HI_async_test(int asyncId, int threadID=NO_THREAD_ID)=0; virtual int HI_async_test_ifpresent(int asyncId, int threadID=NO_THREAD_ID)=0; virtual int HI_async_test_all(int threadID=NO_THREAD_ID)=0; virtual void HI_wait_for_events(int async, int num_waits, int* waits, int threadID=NO_THREAD_ID)=0; virtual void HI_malloc(void **devPtr, size_t size, HI_MallocKind_t flags, int threadID=NO_THREAD_ID) = 0; virtual void HI_free(void *devPtr, int threadID=NO_THREAD_ID) = 0; HI_error_t HI_get_device_address(const void *hostPtr, void **devPtr, int asyncID, int tid) { size_t offset; HI_error_t result = HI_get_device_address(hostPtr, devPtr, &offset, NULL, asyncID, tid); if( result == HI_success ) { //*devPtr contains a device pointer corresponding to the hostPtr. *devPtr = (void *)((size_t)*devPtr + offset); } else { *devPtr = NULL; } return result; } HI_error_t HI_get_device_address(const void *hostPtr, void **devPtrBase, size_t* offset, int asyncID, int tid) { return HI_get_device_address(hostPtr, devPtrBase, offset, NULL, asyncID, tid); } HI_error_t HI_get_device_address(const void *hostPtr, void **devPtrBase, size_t *offset, size_t *size, int asyncID, int tid) { bool emptyTable1 = false; #if PRESENT_TABLE_SEARCH_MODE == 0 bool emptyTable2 = false; #endif addresstable_t *masterAddressTable = masterAddressTableMap[tid]; int defaultAsyncID = DEFAULT_QUEUE+tid*MAX_NUM_QUEUES_PER_THREAD; addresstable_t::iterator it = masterAddressTable->find(asyncID); if(it == masterAddressTable->end() ) { addressmap_t* emptyMap = new addressmap_t(); masterAddressTable->insert(std::pair<int, addressmap_t *> (asyncID, emptyMap)); it = masterAddressTable->find(asyncID); emptyTable1 = true; } addressmap_t *tAddressMap = it->second; #if PRESENT_TABLE_SEARCH_MODE == 0 //Check whether hostPtr exists as an entry to addressTable (it->second), //which will be true if hostPtr is a base address of the pointed memory. addressmap_t::iterator it2 = tAddressMap->find(hostPtr); #ifdef _OPENARC_PROFILE_ presenttablecnt_t::iterator ptit = presentTableCntMap.find(tid); if( HI_openarcrt_verbosity > 1 ) { if(ptit == presentTableCntMap.end()) { presentTableCntMap.insert(std::pair<int, long> (tid, 0)); } ptit->second++; } #endif if(it2 != tAddressMap->end() ) { addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; *devPtrBase = aet->basePtr; if( size ) *size = aet->size; if( offset ) *offset = 0; return HI_success; } else { //check on the default stream if( defaultAsyncID != asyncID ) { it = masterAddressTable->find(defaultAsyncID); if(it == masterAddressTable->end() ) { addressmap_t *emptyMap = new addressmap_t(); masterAddressTable->insert(std::pair<int, addressmap_t*> (defaultAsyncID, emptyMap)); //it = masterAddressTable->find(asyncID); emptyTable2 = true; } else { tAddressMap = it->second; it2 = tAddressMap->find(hostPtr); #ifdef _OPENARC_PROFILE_ if( HI_openarcrt_verbosity > 1 ) { ptit->second++; } #endif if(it2 != tAddressMap->end() ) { addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; *devPtrBase = aet->basePtr; if( size ) *size = aet->size; if( offset ) *offset = 0; return HI_success; } } } else { emptyTable2 = emptyTable1; } } if( emptyTable1 && emptyTable2 ) { *devPtrBase = NULL; if( size ) *size = 0; if( offset ) *offset = 0; return HI_error; } //Check whether hostPtr is within the range of an allocated memory region //in the addressTable. it = masterAddressTable->find(asyncID); tAddressMap = it->second; for (addressmap_t::iterator it2 = tAddressMap->begin(); it2 != tAddressMap->end(); ++it2) { const void* aet_host = it2->first; #ifdef _OPENARC_PROFILE_ if( HI_openarcrt_verbosity > 1 ) { ptit->second++; } #endif addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; if (hostPtr >= aet_host && (size_t) hostPtr < (size_t) aet_host + aet->size) { *devPtrBase = aet->basePtr; if( size ) *size = aet->size; if (offset) *offset = (size_t) hostPtr - (size_t) aet_host; return HI_success; } } //check on the default stream if( defaultAsyncID != asyncID ) { it = masterAddressTable->find(DEFAULT_QUEUE+tid*MAX_NUM_QUEUES_PER_THREAD); tAddressMap = it->second; for (addressmap_t::iterator it2 = tAddressMap->begin(); it2 != tAddressMap->end(); ++it2) { const void* aet_host = it2->first; #ifdef _OPENARC_PROFILE_ if( HI_openarcrt_verbosity > 1 ) { ptit->second++; } #endif addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; if (hostPtr >= aet_host && (size_t) hostPtr < (size_t) aet_host + aet->size) { *devPtrBase = aet->basePtr; if( size ) *size = aet->size; if (offset) *offset = (size_t) hostPtr - (size_t) aet_host; return HI_success; } } } #else //Check whether hostPtr exists as an entry to addressTable (it->second), //which will be true if hostPtr is a base address of the pointed memory. addressmap_t::iterator it2 = tAddressMap->lower_bound(hostPtr); #ifdef _OPENARC_PROFILE_ presenttablecnt_t::iterator ptit = presentTableCntMap.find(tid); if( HI_openarcrt_verbosity > 1 ) { if(ptit == presentTableCntMap.end()) { presentTableCntMap.insert(std::pair<int, long> (tid, 0)); } ptit->second++; } #endif if(it2 != tAddressMap->end() ) { if( it2->first == hostPtr ) { //found the entry matching the key, hostPtr. addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; *devPtrBase = aet->basePtr; if( size ) *size = aet->size; if( offset ) *offset = 0; return HI_success; } else { //hostPtr may belong to an entry before the current one. if( it2 == tAddressMap->begin() ) { //There is no entry before the current one. //return NULL; //Do not return here; check the default stream. } else { --it2; const void* aet_hostPtr = it2->first; addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; if (hostPtr >= aet_hostPtr && (size_t) hostPtr < (size_t) aet_hostPtr + aet->size) { *devPtrBase = aet->basePtr; if( size ) *size = aet->size; if (offset) *offset = (size_t) hostPtr - (size_t) aet_hostPtr; return HI_success; } } } } else if( !tAddressMap->empty() ) { //hostPtr may belong to the last entry. addressmap_t::reverse_iterator it3 = tAddressMap->rbegin(); const void* aet_hostPtr = it3->first; addresstable_entity_t *aet = (addresstable_entity_t*) it3->second; if (hostPtr >= aet_hostPtr && (size_t) hostPtr < (size_t) aet_hostPtr + aet->size) { *devPtrBase = aet->basePtr; if( size ) *size = aet->size; if (offset) *offset = (size_t) hostPtr - (size_t) aet_hostPtr; return HI_success; } } if( asyncID != defaultAsyncID ) { //check on the default stream it = masterAddressTable->find(defaultAsyncID); if(it == masterAddressTable->end() ) { addressmap_t* emptyMap = new addressmap_t(); masterAddressTable->insert(std::pair<int, addressmap_t *> (defaultAsyncID, emptyMap)); it = masterAddressTable->find(defaultAsyncID); } tAddressMap = it->second; addressmap_t::iterator it2 = tAddressMap->lower_bound(hostPtr); #ifdef _OPENARC_PROFILE_ if( HI_openarcrt_verbosity > 1 ) { ptit->second++; } #endif if(it2 != tAddressMap->end() ) { if( it2->first == hostPtr ) { //found the entry matching the key, hostPtr. addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; *devPtrBase = aet->basePtr; if( size ) *size = aet->size; if( offset ) *offset = 0; return HI_success; } else { //hostPtr may belong to an entry before the current one. if( it2 == tAddressMap->begin() ) { //There is no entry before the current one. *devPtrBase = NULL; if( size ) *size = 0; if( offset ) *offset = 0; return HI_error; } else { --it2; const void* aet_hostPtr = it2->first; addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; if (hostPtr >= aet_hostPtr && (size_t) hostPtr < (size_t) aet_hostPtr + aet->size) { *devPtrBase = aet->basePtr; if( size ) *size = aet->size; if (offset) *offset = (size_t) hostPtr - (size_t) aet_hostPtr; return HI_success; } } } } else if( !tAddressMap->empty() ) { //hostPtr may belong to the last entry. addressmap_t::reverse_iterator it3 = tAddressMap->rbegin(); const void* aet_hostPtr = it3->first; addresstable_entity_t *aet = (addresstable_entity_t*) it3->second; if (hostPtr >= aet_hostPtr && (size_t) hostPtr < (size_t) aet_hostPtr + aet->size) { *devPtrBase = aet->basePtr; if( size ) *size = aet->size; if (offset) *offset = (size_t) hostPtr - (size_t) aet_hostPtr; return HI_success; } } } #endif //fprintf(stderr, "[ERROR in get_device_address()] No mapping found for the host pointer\n"); *devPtrBase = NULL; if( size ) *size = 0; if( offset ) *offset = 0; return HI_error; } HI_error_t HI_set_device_address(const void *hostPtr, void * devPtr, size_t size, int asyncID, int tid) { addresstable_t *masterAddressTable = masterAddressTableMap[tid]; addresstable_t::iterator it = masterAddressTable->find(asyncID); //fprintf(stderr, "[in set_device_address()] Setting address\n"); if(it == masterAddressTable->end() ) { //fprintf(stderr, "[in set_device_address()] No mapping found for the asyncID\n"); addressmap_t *emptyMap = new addressmap_t(); addresstable_entity_t *aet = new addresstable_entity_t(devPtr, size); (*emptyMap)[hostPtr] = (void *) aet; //emptyMap->insert(std::pair<const void*, void*>(hostPtr, (void *) aet)); masterAddressTable->insert(std::pair<int, addressmap_t *> (asyncID, emptyMap)); //it = masterAddressTable->find(asyncID); } else { addresstable_entity_t *aet = new addresstable_entity_t(devPtr, size); (*(it->second))[hostPtr] = (void*) aet; //(it->second)->insert(std::pair<const void*, void*>(hostPtr, (void*) aet)); } #ifdef _OPENARC_PROFILE_ presenttablecnt_t::iterator ptit = presentTableCntMap.find(tid); if( HI_openarcrt_verbosity > 1 ) { if(ptit == presentTableCntMap.end()) { presentTableCntMap.insert(std::pair<int, long> (tid, 0)); } ptit->second++; } #endif return HI_success; } HI_error_t HI_remove_device_address(const void *hostPtr, int asyncID, int tid) { addresstable_t *masterAddressTable = masterAddressTableMap[tid]; addresstable_t::iterator it = masterAddressTable->find(asyncID); addressmap_t::iterator it2 = (it->second)->find(hostPtr); #ifdef _OPENARC_PROFILE_ presenttablecnt_t::iterator ptit = presentTableCntMap.find(tid); if( HI_openarcrt_verbosity > 1 ) { if(ptit == presentTableCntMap.end()) { presentTableCntMap.insert(std::pair<int, long> (tid, 0)); } ptit->second++; } #endif if(it2 != (it->second)->end() ) { addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; delete aet; (it->second)->erase(it2); return HI_success; } else { fprintf(stderr, "[ERROR in remove_device_address()] No mapping found for the host pointer on async ID %d\n", asyncID); return HI_error; } } void HI_print_device_address_mapping_summary(int tid) { addresstable_t *masterAddressTable = masterAddressTableMap[tid]; memPool_t *memPool = memPoolMap[tid]; size_t num_table_entries = 0; size_t total_allocated_device_memory = 0; for (addresstable_t::iterator it = masterAddressTable->begin(); it != masterAddressTable->end(); ++it) { addressmap_t *tAddressMap = it->second; for (addressmap_t::iterator it2 = tAddressMap->begin(); it2 != tAddressMap->end(); ++it2) { addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; total_allocated_device_memory += aet->size; num_table_entries++; } } fprintf(stderr, "[OPENARCRT-INFO]\t\t\tSummary of host-to-device-address mapping table for host thread %d\n", tid); fprintf(stderr, " \t\t\tNumber of mapping entries = %lu\n", num_table_entries); fprintf(stderr, " \t\t\tTotal allocated device memory = %lu\n", total_allocated_device_memory); num_table_entries = 0; total_allocated_device_memory = 0; for (memPool_t::iterator it = memPool->begin(); it != memPool->end(); ++it) { total_allocated_device_memory += it->first * memPool->count(it->first); num_table_entries++; } fprintf(stderr, "[OPENARCRT-INFO]\t\t\tSummary of device-memory pool table for host thread %d\n", tid); fprintf(stderr, " \t\t\tNumber of mapping entries = %lu\n", num_table_entries); fprintf(stderr, " \t\t\tTotal reserved device memory pool = %lu\n", total_allocated_device_memory); } void HI_print_device_address_mapping_entries(int tid) { addresstable_t *masterAddressTable = masterAddressTableMap[tid]; memPool_t *memPool = memPoolMap[tid]; addressmap_t *myHandleMap = masterHandleTable[tid]; fprintf(stderr, "[OPENARCRT-INFO]\t\t\tHost-to-device-address mapping table entries for host thread %d\n", tid); fprintf(stderr, " \t\t\tHostPtr\tDevPtr\n"); for (addresstable_t::iterator it = masterAddressTable->begin(); it != masterAddressTable->end(); ++it) { addressmap_t *tAddressMap = it->second; for (addressmap_t::iterator it2 = tAddressMap->begin(); it2 != tAddressMap->end(); ++it2) { addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; fprintf(stderr, " \t\t\t%lx\t%lx\n", (unsigned long)it2->first, (unsigned long)aet->basePtr); } } fprintf(stderr, "[OPENARCRT-INFO]\t\t\tDevPtr-to-MemHandle mapping table entries for host thread %d\n", tid); fprintf(stderr, " \t\t\tDevPtr\tMemHandle\n"); for(addressmap_t::iterator it = myHandleMap->begin(); it != myHandleMap->end(); ++it ) { addresstable_entity_t *aet = (addresstable_entity_t*) it->second; fprintf(stderr, " \t\t\t%lx\t%lx\n", (unsigned long)it->first, (unsigned long)aet->basePtr); } fprintf(stderr, "[OPENARCRT-INFO]\t\t\tDevPtr in the memory pool for host thread %d\n", tid); fprintf(stderr, " \t\t\tDevPtr\n"); for (memPool_t::iterator it = memPool->begin(); it != memPool->end(); ++it) { fprintf(stderr, " \t\t\t%lx\n", (unsigned long)it->second); } } HI_error_t HI_get_host_address(const void *devPtr, void** hostPtr, int asyncID, int tid) { addresstable_t *masterAddressTable = masterAddressTableMap[tid]; int defaultAsyncID = DEFAULT_QUEUE+tid*MAX_NUM_QUEUES_PER_THREAD; addresstable_t::iterator it = masterAddressTable->find(asyncID); #ifdef _OPENARC_PROFILE_ presenttablecnt_t::iterator ptit = presentTableCntMap.find(tid); if( HI_openarcrt_verbosity > 1 ) { if(ptit == presentTableCntMap.end()) { presentTableCntMap.insert(std::pair<int, long> (tid, 0)); } } #endif if(it != masterAddressTable->end() ) { addressmap_t *tAddressMap = it->second; for( addressmap_t::iterator it3 = tAddressMap->begin(); it3 != tAddressMap->end(); ++it3 ) { #ifdef _OPENARC_PROFILE_ if( HI_openarcrt_verbosity > 1 ) { ptit->second++; } #endif addresstable_entity_t *aet = (addresstable_entity_t*) it3->second; if( aet->basePtr == devPtr ) { *hostPtr = (void *)it3->first; return HI_success; } else if (devPtr >= aet->basePtr && (size_t) devPtr < (size_t) aet->basePtr + aet->size) { *hostPtr = (void*) ((size_t) it3->first + ((size_t) devPtr - (size_t) aet->basePtr)); return HI_success; } } } if( asyncID != defaultAsyncID ) { //Check default queue. addresstable_t::iterator it = masterAddressTable->find(defaultAsyncID); if(it != masterAddressTable->end() ) { addressmap_t *tAddressMap = it->second; for( addressmap_t::iterator it3 = tAddressMap->begin(); it3 != tAddressMap->end(); ++it3 ) { #ifdef _OPENARC_PROFILE_ if( HI_openarcrt_verbosity > 1 ) { ptit->second++; } #endif addresstable_entity_t *aet = (addresstable_entity_t*) it3->second; if( aet->basePtr == devPtr ) { *hostPtr = (void *)it3->first; return HI_success; } else if (devPtr >= aet->basePtr && (size_t) devPtr < (size_t) aet->basePtr + aet->size) { *hostPtr = (void*) ((size_t) it3->first + ((size_t) devPtr - (size_t) aet->basePtr)); return HI_success; } } } } *hostPtr = NULL; return HI_error; } const void * HI_get_base_address_of_host_memory(const void *hostPtr, int asyncID, int tid) { addresstable_t *masterAddressTable = masterAddressTableMap[tid]; addresstable_t::iterator it = masterAddressTable->find(asyncID); int defaultAsyncID = DEFAULT_QUEUE+tid*MAX_NUM_QUEUES_PER_THREAD; if(it == masterAddressTable->end() ) { addressmap_t* emptyMap = new addressmap_t(); masterAddressTable->insert(std::pair<int, addressmap_t *> (asyncID, emptyMap)); it = masterAddressTable->find(asyncID); } addressmap_t *tAddressMap = it->second; #if PRESENT_TABLE_SEARCH_MODE == 0 //Check whether hostPtr exists as an entry to addressTable (it->second), //which will be true if hostPtr is a base address of the pointed memory. addressmap_t::iterator it2 = tAddressMap->find(hostPtr); #ifdef _OPENARC_PROFILE_ presenttablecnt_t::iterator ptit = presentTableCntMap.find(tid); if( HI_openarcrt_verbosity > 1 ) { if(ptit == presentTableCntMap.end()) { presentTableCntMap.insert(std::pair<int, long> (tid, 0)); } } #endif if(it2 != tAddressMap->end() ) { return hostPtr; } else { //check on the default stream it = masterAddressTable->find(defaultAsyncID); if(it == masterAddressTable->end() ) { addressmap_t* emptyMap = new addressmap_t(); masterAddressTable->insert(std::pair<int, addressmap_t *> (defaultAsyncID, emptyMap)); it = masterAddressTable->find(defaultAsyncID); } tAddressMap = it->second; it2 = tAddressMap->find(hostPtr); #ifdef _OPENARC_PROFILE_ if( HI_openarcrt_verbosity > 1 ) { ptit->second++; } #endif if(it2 != tAddressMap->end() ) { return hostPtr; } } //Check whether hostPtr is within the range of an allocated memory region //in the addressTable. for (addressmap_t::iterator it2 = tAddressMap->begin(); it2 != tAddressMap->end(); ++it2) { #ifdef _OPENARC_PROFILE_ if( HI_openarcrt_verbosity > 1 ) { ptit->second++; } #endif const void* aet_host = it2->first; addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; if (hostPtr >= aet_host && (size_t) hostPtr < (size_t) aet_host + aet->size) { return aet_host; } } if( asyncID != defaultAsyncID ) { //check on the default stream it = masterAddressTable->find(defaultAsyncID); tAddressMap = it->second; for (addressmap_t::iterator it2 = tAddressMap->begin(); it2 != tAddressMap->end(); ++it2) { #ifdef _OPENARC_PROFILE_ if( HI_openarcrt_verbosity > 1 ) { ptit->second++; } #endif const void* aet_host = it2->first; addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; if (hostPtr >= aet_host && (size_t) hostPtr < (size_t) aet_host + aet->size) { return aet_host; } } } #else //Check whether hostPtr exists as an entry to addressTable (it->second), //which will be true if hostPtr is a base address of the pointed memory. addressmap_t::iterator it2 = tAddressMap->lower_bound(hostPtr); #ifdef _OPENARC_PROFILE_ presenttablecnt_t::iterator ptit = presentTableCntMap.find(tid); if( HI_openarcrt_verbosity > 1 ) { if(ptit == presentTableCntMap.end()) { presentTableCntMap.insert(std::pair<int, long> (tid, 0)); } ptit->second++; } #endif if(it2 != tAddressMap->end() ) { if( it2->first == hostPtr ) { //found the entry matching the key, hostPtr. return hostPtr; } else { //hostPtr may belong to an entry before the current one. if( it2 == tAddressMap->begin() ) { //There is no entry before the current one. //return NULL; //Do not return here; check the default stream. } else { --it2; const void* aet_hostPtr = it2->first; addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; if (hostPtr >= aet_hostPtr && (size_t) hostPtr < (size_t) aet_hostPtr + aet->size) { return aet_hostPtr; } } } } else if( !tAddressMap->empty() ) { //hostPtr may belong to the last entry. addressmap_t::reverse_iterator it3 = tAddressMap->rbegin(); const void* aet_hostPtr = it3->first; addresstable_entity_t *aet = (addresstable_entity_t*) it3->second; if (hostPtr >= aet_hostPtr && (size_t) hostPtr < (size_t) aet_hostPtr + aet->size) { return aet_hostPtr; } } if( asyncID != defaultAsyncID ) { //check on the default stream it = masterAddressTable->find(defaultAsyncID); if(it == masterAddressTable->end() ) { addressmap_t* emptyMap = new addressmap_t(); masterAddressTable->insert(std::pair<int, addressmap_t *> (defaultAsyncID, emptyMap)); it = masterAddressTable->find(defaultAsyncID); } tAddressMap = it->second; addressmap_t::iterator it2 = tAddressMap->lower_bound(hostPtr); #ifdef _OPENARC_PROFILE_ if( HI_openarcrt_verbosity > 1 ) { ptit->second++; } #endif if(it2 != tAddressMap->end() ) { if( it2->first == hostPtr ) { //found the entry matching the key, hostPtr. return hostPtr; } else { //hostPtr may belong to an entry before the current one. if( it2 == tAddressMap->begin() ) { //There is no entry before the current one. return NULL; } else { --it2; const void* aet_hostPtr = it2->first; addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; if (hostPtr >= aet_hostPtr && (size_t) hostPtr < (size_t) aet_hostPtr + aet->size) { return aet_hostPtr; } } } } else if( !tAddressMap->empty() ) { //hostPtr may belong to the last entry. addressmap_t::reverse_iterator it3 = tAddressMap->rbegin(); const void* aet_hostPtr = it3->first; addresstable_entity_t *aet = (addresstable_entity_t*) it3->second; if (hostPtr >= aet_hostPtr && (size_t) hostPtr < (size_t) aet_hostPtr + aet->size) { return aet_hostPtr; } } } #endif //No entry is found. return NULL; } HI_error_t HI_get_device_address_from_victim_cache(const void *hostPtr, void **devPtrBase, size_t *offset, size_t *size, int asyncID, int tid) { HI_error_t ret = HI_error; #ifdef _THREAD_SAFETY pthread_mutex_lock(&mutex_victim_cache); #else #ifdef _OPENMP #pragma omp critical (victim_cache_critical) #endif #endif { addresstable_t::iterator it = auxAddressTable.find(asyncID); //Check whether hostPtr exists as an entry to addressTable (it->second), //which will be true if hostPtr is a base address of the pointed memory. addressmap_t::iterator it2 = (it->second)->find(hostPtr); #ifdef _OPENARC_PROFILE_ presenttablecnt_t::iterator ptit = presentTableCntMap.find(tid); if( HI_openarcrt_verbosity > 1 ) { if(ptit == presentTableCntMap.end()) { presentTableCntMap.insert(std::pair<int, long> (tid, 0)); } ptit->second++; } #endif if(it2 != (it->second)->end() ) { addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; *devPtrBase = aet->basePtr; if( size ) *size = aet->size; if( offset ) *offset = 0; //*devPtrBase = it2->second; ret = HI_success; } else { //check on the default stream it = auxAddressTable.find(DEFAULT_QUEUE+tid*MAX_NUM_QUEUES_PER_THREAD); it2 = (it->second)->find(hostPtr); if(it2 != (it->second)->end() ) { addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; *devPtrBase = aet->basePtr; if( size ) *size = aet->size; if( offset ) *offset = 0; // *devPtrBase = it2->second; ret = HI_success; } } } #ifdef _THREAD_SAFETY pthread_mutex_unlock(&mutex_victim_cache); #endif return ret; } HI_error_t HI_set_device_address_in_victim_cache (const void *hostPtr, void * devPtr, size_t size, int asyncID) { #ifdef _THREAD_SAFETY pthread_mutex_lock(&mutex_victim_cache); #else #ifdef _OPENMP #pragma omp critical (victim_cache_critical) #endif #endif { addresstable_t::iterator it = auxAddressTable.find(asyncID); if(it == auxAddressTable.end() ) { addressmap_t *emptyMap = new addressmap_t(); addresstable_entity_t *aet = new addresstable_entity_t(devPtr, size); (*emptyMap)[hostPtr] = (void *) aet; auxAddressTable.insert(std::pair<int, addressmap_t*> (asyncID, emptyMap)); } else { //(it->second).insert(std::pair<const void *,void*>(hostPtr, devPtr)); //(it->second)[hostPtr] = devPtr; addresstable_entity_t *aet = new addresstable_entity_t(devPtr, size); (*(it->second))[hostPtr] = (void*) aet; } } #ifdef _THREAD_SAFETY pthread_mutex_unlock(&mutex_victim_cache); #endif return HI_success; } HI_error_t HI_remove_device_address_from_victim_cache (const void *hostPtr, int asyncID) { HI_error_t ret; #ifdef _THREAD_SAFETY pthread_mutex_lock(&mutex_victim_cache); #else #ifdef _OPENMP #pragma omp critical (victim_cache_critical) #endif #endif { addresstable_t::iterator it = auxAddressTable.find(asyncID); addressmap_t::iterator it2 = (it->second)->find(hostPtr); if(it2 != (it->second)->end() ) { addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; delete aet; (it->second)->erase(it2); ret = HI_success; } else { fprintf(stderr, "[ERROR in remove_device_address_from_victim_cache()] No mapping found for the host pointer on async ID %d\n", asyncID); ret = HI_error; } } #ifdef _THREAD_SAFETY pthread_mutex_unlock(&mutex_victim_cache); #endif return ret; } HI_error_t HI_reset_victim_cache ( int asyncID ) { #ifdef _THREAD_SAFETY pthread_mutex_lock(&mutex_victim_cache); #else #ifdef _OPENMP #pragma omp critical (victim_cache_critical) #endif #endif { addresstable_t::iterator it = auxAddressTable.find(asyncID); while(it != auxAddressTable.end()) { for( addressmap_t::iterator it2 = (it->second)->begin(); it2 != (it->second)->end(); ++it2 ) { addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; delete aet; } (it->second)->clear(); it++; } } #ifdef _THREAD_SAFETY pthread_mutex_unlock(&mutex_victim_cache); #endif return HI_success; } HI_error_t HI_reset_victim_cache_all ( ) { #ifdef _THREAD_SAFETY pthread_mutex_lock(&mutex_victim_cache); #else #ifdef _OPENMP #pragma omp critical (victim_cache_critical) #endif #endif for( addresstable_t::iterator it = auxAddressTable.begin(); it != auxAddressTable.end(); ++it ) { for( addressmap_t::iterator it2 = (it->second)->begin(); it2 != (it->second)->end(); ++it2 ) { addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; delete aet; } (it->second)->clear(); } #ifdef _THREAD_SAFETY pthread_mutex_unlock(&mutex_victim_cache); #endif return HI_success; } HI_error_t HI_get_device_mem_handle(const void *devPtr, HI_device_mem_handle_t *memHandle, int tid) { return HI_get_device_mem_handle(devPtr, memHandle, NULL, tid); } HI_error_t HI_get_device_mem_handle(const void *devPtr, HI_device_mem_handle_t *memHandle, size_t *size, int tid) { addressmap_t *myHandleMap = masterHandleTable[tid]; #if PRESENT_TABLE_SEARCH_MODE == 0 //Check whether devPtr exists as an entry to myHandleMap, //which will be true if devPtr is a base address of the pointed memory. addressmap_t::iterator it2 = myHandleMap->find(devPtr); #ifdef _OPENARC_PROFILE_ presenttablecnt_t::iterator ptit = presentTableCntMap.find(tid); if( HI_openarcrt_verbosity > 1 ) { if(ptit == presentTableCntMap.end()) { presentTableCntMap.insert(std::pair<int, long> (tid, 0)); } ptit->second++; } #endif if(it2 != myHandleMap->end() ) { addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; memHandle->basePtr = aet->basePtr; memHandle->offset = 0; if( size != NULL ) { *size = aet->size; } return HI_success; } //Check whether devPtr is within the range of an allocated memory region //in the addressTable. for (addressmap_t::iterator it2 = myHandleMap->begin(); it2 != myHandleMap->end(); ++it2) { const void* aet_devPtr = it2->first; addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; #ifdef _OPENARC_PROFILE_ if( HI_openarcrt_verbosity > 1 ) { ptit->second++; } #endif if (devPtr >= aet_devPtr && (size_t) devPtr < (size_t) aet_devPtr + aet->size) { memHandle->basePtr = aet->basePtr; memHandle->offset = (size_t) devPtr - (size_t) aet_devPtr; if( size != NULL ) { *size = aet->size; } return HI_success; } } #else //Check whether devPtr exists as an entry to myHandleMap, //which will be true if devPtr is a base address of the pointed memory. addressmap_t::iterator it2 = myHandleMap->lower_bound(devPtr); #ifdef _OPENARC_PROFILE_ presenttablecnt_t::iterator ptit = presentTableCntMap.find(tid); if( HI_openarcrt_verbosity > 1 ) { if(ptit == presentTableCntMap.end()) { presentTableCntMap.insert(std::pair<int, long> (tid, 0)); } ptit->second++; } #endif if(it2 != myHandleMap->end() ) { if( it2->first == devPtr ) { //found the entry matching the key, devPtr. addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; memHandle->basePtr = aet->basePtr; memHandle->offset = 0; if( size != NULL ) { *size = aet->size; } return HI_success; } else { //devPtr may belong to an entry before the current one. if( it2 == myHandleMap->begin() ) { //There is no entry before the current one. memHandle->basePtr = NULL; memHandle->offset = 0; if( size != NULL ) { *size = 0; } return HI_error; } else { --it2; const void* aet_devPtr = it2->first; addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; if (devPtr >= aet_devPtr && (size_t) devPtr < (size_t) aet_devPtr + aet->size) { memHandle->basePtr = aet->basePtr; memHandle->offset = (size_t) devPtr - (size_t) aet_devPtr; if( size != NULL ) { *size = aet->size; } return HI_success; } } } } else if( !myHandleMap->empty() ) { //devPtr may belong to the last entry. addressmap_t::reverse_iterator it3 = myHandleMap->rbegin(); const void* aet_devPtr = it3->first; addresstable_entity_t *aet = (addresstable_entity_t*) it3->second; if (devPtr >= aet_devPtr && (size_t) devPtr < (size_t) aet_devPtr + aet->size) { memHandle->basePtr = aet->basePtr; memHandle->offset = (size_t) devPtr - (size_t) aet_devPtr; if( size != NULL ) { *size = aet->size; } return HI_success; } } #endif //fprintf(stderr, "[ERROR in get_device_mem_handle()] No mapping found for the device pointer\n"); memHandle->basePtr = NULL; memHandle->offset = 0; if( size != NULL ) { *size = 0; } return HI_error; } HI_error_t HI_set_device_mem_handle(const void *devPtr, void * handle, size_t size, int tid) { addressmap_t *myHandleMap = masterHandleTable[tid]; //fprintf(stderr, "[in set_device_mem_handle()] Setting address\n"); addresstable_entity_t *aet = new addresstable_entity_t(handle, size); (*myHandleMap)[devPtr] = (void*) aet; //myHandleMap->insert(std::pair<const void*, void*>(devPtr, (void*) aet)); #ifdef _OPENARC_PROFILE_ presenttablecnt_t::iterator ptit = presentTableCntMap.find(tid); if( HI_openarcrt_verbosity > 1 ) { if(ptit == presentTableCntMap.end()) { presentTableCntMap.insert(std::pair<int, long> (tid, 0)); } ptit->second++; } #endif return HI_success; } HI_error_t HI_remove_device_mem_handle(const void *devPtr, int tid) { addressmap_t *myHandleMap = masterHandleTable[tid]; addressmap_t::iterator it2 = myHandleMap->find(devPtr); #ifdef _OPENARC_PROFILE_ presenttablecnt_t::iterator ptit = presentTableCntMap.find(tid); if( HI_openarcrt_verbosity > 1 ) { if(ptit == presentTableCntMap.end()) { presentTableCntMap.insert(std::pair<int, long> (tid, 0)); } ptit->second++; } #endif if(it2 != myHandleMap->end() ) { addresstable_entity_t *aet = (addresstable_entity_t*) it2->second; delete aet; myHandleMap->erase(it2); return HI_success; } else { fprintf(stderr, "[ERROR in remove_device_mem_handle()] No mapping found for the device pointer\n"); return HI_error; } } HI_error_t HI_free_async( const void *hostPtr, int asyncID, int tid) { //fprintf(stderr, "[in HI_free_async()] with asyncID %d\n", asyncID); asyncfreetable_t *postponedFreeTable = postponedFreeTableMap[tid]; postponedFreeTable->insert(std::pair<int, const void *>(asyncID, hostPtr)); return HI_success; } HI_error_t HI_postponed_free(int asyncID, int tid) { #ifdef _OPENARC_PROFILE_ if( HI_openarcrt_verbosity > 3 ) { fprintf(stderr, "[OPENARCRT-INFO]\t\t\tenter HI_postponed_free()\n"); } #endif asyncfreetable_t *postponedFreeTable = postponedFreeTableMap[tid]; std::multimap<int, const void*>::iterator hostPtrIter = postponedFreeTable->find(asyncID); while(hostPtrIter != postponedFreeTable->end()) { //fprintf(stderr, "[in HI_postponed_free()] Freeing on stream %d, address %x\n", asyncID, hostPtrIter->second); HI_free(hostPtrIter->second, asyncID, tid); hostPtrIter++; } postponedFreeTable->erase(asyncID); #ifdef _OPENARC_PROFILE_ if( HI_openarcrt_verbosity > 3 ) { fprintf(stderr, "[OPENARCRT-INFO]\t\t\texit HI_postponed_free()\n"); } #endif return HI_success; } HI_error_t HI_tempFree_async( void **tempPtr, acc_device_t devType, int asyncID, int tid) { //fprintf(stderr, "[in HI_tempFree_async()] with asyncID %d\n", asyncID); if( postponedTempFreeTableMap.count(tid) == 0 ) { fprintf(stderr, "[ERROR in HI_tempFree_async()] No mapping found for thread ID = %d\n", tid); } else { asynctempfreetable_t *postponedTempFreeTable = postponedTempFreeTableMap[tid]; postponedTempFreeTable->insert(std::pair<int, void **>(asyncID, tempPtr)); asynctempfreetable2_t *postponedTempFreeTable2 = postponedTempFreeTableMap2[tid]; postponedTempFreeTable2->insert(std::pair<int, acc_device_t>(asyncID, devType)); } return HI_success; } HI_error_t HI_postponed_tempFree(int asyncID, acc_device_t devType, int tid) { #ifdef _OPENARC_PROFILE_ if( HI_openarcrt_verbosity > 3 ) { fprintf(stderr, "[OPENARCRT-INFO]\t\t\tenter HI_postponed_tempFree(devType = %d, thread ID = %d)\n", devType, tid); } #endif if( postponedTempFreeTableMap.count(tid) == 0 ) { fprintf(stderr, "[ERROR in HI_postponed_tempFree()] No mapping found for thread ID = %d\n", tid); } else { asynctempfreetable_t *postponedTempFreeTable = postponedTempFreeTableMap[tid]; std::multimap<int, void**>::iterator tempPtrIter = postponedTempFreeTable->find(asyncID); asynctempfreetable2_t *postponedTempFreeTable2 = postponedTempFreeTableMap2[tid]; std::multimap<int, acc_device_t>::iterator tempPtrIter2 = postponedTempFreeTable2->find(asyncID); while((tempPtrIter != postponedTempFreeTable->end()) && (tempPtrIter2 != postponedTempFreeTable2->end())) { //fprintf(stderr, "[in HI_postponed_TempFree()] Freeing on stream %d, address %x\n", asyncID, tempPtrIter->second); HI_tempFree(tempPtrIter->second, tempPtrIter2->second, tid); tempPtrIter++; tempPtrIter2++; } if(tempPtrIter != postponedTempFreeTable->end()) { fprintf(stderr, "[ERROR in HI_postponed_tempFree()] postponedTempFreeTable has more entries for thread ID = %d\n", tid); } if(tempPtrIter2 != postponedTempFreeTable2->end()) { fprintf(stderr, "[ERROR in HI_postponed_tempFree()] postponedTempFreeTable2 has more entries for thread ID = %d\n", tid); } postponedTempFreeTable->erase(asyncID); postponedTempFreeTable2->erase(asyncID); } #ifdef _OPENARC_PROFILE_ if( HI_openarcrt_verbosity > 3 ) { fprintf(stderr, "[OPENARCRT-INFO]\t\t\texit HI_postponed_tempFree(devType = %d, thread ID = %d)\n", devType, tid); } #endif return HI_success; } HI_error_t HI_get_temphost_address(const void *hostPtr, void **temphostPtr, int asyncID, int tid) { addresstable_t::iterator it = tempHostAddressTable.find(asyncID); addressmap_t::iterator it2 = (it->second)->find(hostPtr); if(it2 != (it->second)->end() ) { *temphostPtr = it2->second; return HI_success; } else { //check on the default stream it = tempHostAddressTable.find(DEFAULT_QUEUE+tid*MAX_NUM_QUEUES_PER_THREAD); it2 = (it->second)->find(hostPtr); if(it2 != (it->second)->end() ) { *temphostPtr = it2->second; return HI_success; } //fprintf(stderr, "[ERROR in get_temphost_address()] No mapping found for the host pointer\n"); return HI_error; } } HI_error_t HI_set_temphost_address(const void *hostPtr, void * temphostPtr, int asyncID) { addresstable_t::iterator it = tempHostAddressTable.find(asyncID); //fprintf(stderr, "[in set_temphost_address()] Setting address\n"); if(it == tempHostAddressTable.end() ) { //fprintf(stderr, "[in set_temphost_address()] No mapping found for the asyncID\n"); addressmap_t * emptyMap = new addressmap_t(); tempHostAddressTable.insert(std::pair<int, addressmap_t*> (asyncID, emptyMap)); it = tempHostAddressTable.find(asyncID); } //(it->second).insert(std::pair<const void *,void*>(hostPtr, temphostPtr)); (*(it->second))[hostPtr] = temphostPtr; return HI_success; } HI_error_t HI_remove_temphost_address(const void *hostPtr, int asyncID) { addresstable_t::iterator it = tempHostAddressTable.find(asyncID); if( it != tempHostAddressTable.end() ) { addressmap_t::iterator it2 = (it->second)->find(hostPtr); if(it2 != (it->second)->end() ) { (it->second)->erase(it2); return HI_success; } else { fprintf(stderr, "[ERROR in remove_temphost_address()] No mapping found for the host pointer on async ID %d\n", asyncID); return HI_error; } } else { fprintf(stderr, "[ERROR in remove_temphost_address()] No mapping found for the host pointer on async ID %d\n", asyncID); return HI_error; } } void HI_free_temphosts(int asyncID ) { #ifdef _OPENARC_PROFILE_ if( HI_openarcrt_verbosity > 3 ) { fprintf(stderr, "[OPENARCRT-INFO]\t\t\tenter HI_free_temphosts()\n"); } #endif addresstable_t::iterator it = tempHostAddressTable.find(asyncID); if (it != tempHostAddressTable.end()) { for( addressmap_t::iterator it2 = (it->second)->begin(); it2 != (it->second)->end(); ++it2 ) { HI_tempFree(&(it2->second), acc_device_host); } (it->second)->clear(); } #ifdef _OPENARC_PROFILE_ if( HI_openarcrt_verbosity > 3 ) { fprintf(stderr, "[OPENARCRT-INFO]\t\t\texit HI_free_temphosts()\n"); } #endif } char * deblank(char *str) { char *out = str, *put = str; for(; *str != '\0'; ++str) { if((*str != ' ') && (*str != ':') && (*str != '(') && (*str != ')') && (*str != '[') && (*str != ']') && (*str != '<') && (*str != '>')) { *put++ = *str; } } *put = '\0'; return out; } } Accelerator_t; /////////////////////////////////////////////////// // Overloaded OpenACC runtime API from openacc.h // /////////////////////////////////////////////////// /////////////////////////////////////////// // OpenACC V1.0 Runtime Library Routines // /////////////////////////////////////////// extern void acc_init( acc_device_t devtype, int kernels, std::string kernelNames[], const char *fileNameBase = "openarc_kernel", int threadID=NO_THREAD_ID); extern int acc_get_num_devices( acc_device_t devtype, int threadID ); extern void acc_set_device_type( acc_device_t devtype, int threadID ); extern acc_device_t acc_get_device_type(int threadID); extern void acc_set_device_num( int devnum, acc_device_t devtype, int threadID ); extern int acc_get_device_num( acc_device_t devtype, int threadID ); extern int acc_async_test( int asyncID, int threadID ); extern int acc_async_test_all(int threadID); extern void acc_async_wait( int asyncID, int threadID ); //renamed to acc_wait() extern void acc_async_wait_all(int threadID); //renamed to acc_wait_all() extern void acc_shutdown( acc_device_t devtype, int threadID ); extern int acc_on_device( acc_device_t devtype, int threadID ); extern d_void* acc_malloc(size_t size, int threadID); extern void acc_free(d_void* devPtr, int threadID); /////////////////////////////////////////////////////////// // OpenACC Runtime Library Routines added in Version 2.0 // /////////////////////////////////////////////////////////// //acc_async_wait() and acc_async_wait_all() are renamed to acc_wait() and //acc_wait_all() in V2.0. extern void acc_wait( int arg, int threadID ); extern void acc_wait_all(int threadID); extern void acc_wait_async(int arg, int async, int threadID); extern void acc_wait_all_async(int async, int threadID); extern void* acc_copyin(h_void* hostPtr, size_t size, int threadID); extern void* acc_copyin_async(h_void* hostPtr, size_t size, int async, int threadID); extern void* acc_pcopyin(h_void* hostPtr, size_t size, int threadID); extern void* acc_present_or_copyin(h_void* hostPtr, size_t size, int threadID); extern void* acc_create(h_void* hostPtr, size_t size, int threadID); extern void* acc_create_async(h_void* hostPtr, size_t size, int async, int threadID); extern void* acc_pcreate(h_void* hostPtr, size_t size, int threadID); extern void* acc_present_or_create(h_void* hostPtr, size_t size, int threadID); extern void acc_copyout(h_void* hostPtr, size_t size, int threadID); extern void acc_copyout_async(h_void* hostPtr, size_t size, int async, int threadID); extern void acc_delete(h_void* hostPtr, size_t size, int threadID); extern void acc_delete_async(h_void* hostPtr, size_t size, int async, int threadID); extern void acc_update_device(h_void* hostPtr, size_t size, int threadID); extern void acc_update_device_async(h_void* hostPtr, size_t size, int async, int threadID); extern void acc_update_self(h_void* hostPtr, size_t size, int threadID); extern void acc_update_self_async(h_void* hostPtr, size_t size, int async, int threadID); extern void acc_map_data(h_void* hostPtr, d_void* devPtr, size_t size, int threadID); extern void acc_unmap_data(h_void* hostPtr, int threadID); extern d_void* acc_deviceptr(h_void* hostPtr, int threadID); extern h_void* acc_hostptr(d_void* devPtr, int threadID); extern int acc_is_present(h_void* hostPtr, size_t size, int threadID); extern void acc_memcpy_to_device(d_void* dest, h_void* src, size_t bytes, int threadID); extern void acc_memcpy_from_device(h_void* dest, d_void* src, size_t bytes, int threadID); /////////////////////////////////////////////////////////// // OpenACC Runtime Library Routines added in Version 2.5 // /////////////////////////////////////////////////////////// extern void acc_memcpy_device(d_void* dest, d_void* src, size_t bytes, int threadID); extern void acc_memcpy_device_async(d_void* dest, d_void* src, size_t bytes, int async, int threadID); /////////////////////////////////////////////////////////// // OpenACC Runtime Library Routines added in Version 2.6 // /////////////////////////////////////////////////////////// extern void acc_attach(h_void** hostPtr, int threadID); extern void acc_attach_async(h_void** hostPtr, int async, int threadID); extern void acc_detach(h_void** hostPtr, int threadID); extern void acc_detach_async(h_void** hostPtr, int async, int threadID); extern void acc_detach_finalize(h_void** hostPtr, int threadID); extern void acc_detach_finalize_async(h_void** hostPtr, int async, int threadID); //extern size_t acc_get_property(int devicenum, acc_device_t devicetype, acc_device_property_t property, int threadID); //extern const char* acc_get_property_string(int devicenum, acc_device_t devicetype, acc_device_property_t property, int threadID); ////////////////////////////////////////////////////////////////////// // Experimental OpenACC Runtime Library Routines for Unified Memory // // (Currently, these work only for specific versions of CUDA GPUs.) // ////////////////////////////////////////////////////////////////////// extern void* acc_copyin_unified(h_void* hostPtr, size_t size, int threadID); extern void* acc_pcopyin_unified(h_void* hostPtr, size_t size, int threadID); extern void* acc_present_or_copyin_unified(h_void* hostPtr, size_t size, int threadID); extern void* acc_create_unified(h_void* hostPtr, size_t size, int threadID); extern void* acc_pcreate_unified(h_void* hostPtr, size_t size, int threadID); extern void* acc_present_or_create_unified(h_void* hostPtr, size_t size, int threadID); extern void acc_copyout_unified(h_void* hostPtr, size_t size, int threadID); extern void acc_delete_unified(h_void* hostPtr, size_t size, int threadID); ///////////////////////////////////////////////////////////////// // Additional OpenACC Runtime Library Routines Used by OpenARC // ///////////////////////////////////////////////////////////////// extern void* acc_copyin_const(h_void* hostPtr, size_t size, int threadID); extern void* acc_pcopyin_const(h_void* hostPtr, size_t size, int threadID); extern void* acc_present_or_copyin_const(h_void* hostPtr, size_t size, int threadID); extern void* acc_create_const(h_void* hostPtr, size_t size, int threadID); extern void* acc_pcreate_const(h_void* hostPtr, size_t size, int threadID); extern void* acc_present_or_create_const(h_void* hostPtr, size_t size, int threadID); extern void* acc_copyin_async_wait(h_void* hostPtr, size_t size, int async, int arg, int threadID); extern void* acc_pcopyin_async_wait(h_void* hostPtr, size_t size, int async, int arg, int threadID); extern void* acc_present_or_copyin_async_wait(h_void* hostPtr, size_t size, int async, int arg, int threadID); extern void* acc_create_async_wait(h_void* hostPtr, size_t size, int async, int arg, int threadID); extern void* acc_pcreate_async_wait(h_void* hostPtr, size_t size, int async, int arg, int threadID); extern void* acc_present_or_create_async_wait(h_void* hostPtr, size_t size, int async, int arg, int threadID); extern void acc_copyout_async_wait(h_void* hostPtr, size_t size, int async, int arg, int threadID); extern void acc_delete_async_wait(h_void* hostPtr, size_t size, int async, int arg, int threadID); //////////////////////// // Runtime init/reset // //////////////////////// extern void HI_hostinit(int threadID); ////////////////////// // Kernel Execution // ////////////////////// //Set the number of arguments to be passed to a kernel. extern HI_error_t HI_register_kernel_numargs(std::string kernel_name, int num_args, int threadID=NO_THREAD_ID); //Register an argument to be passed to a kernel. extern HI_error_t HI_register_kernel_arg(std::string kernel_name, int arg_index, size_t arg_size, void *arg_value, int arg_type, int threadID=NO_THREAD_ID); //Launch a kernel. extern HI_error_t HI_kernel_call(std::string kernel_name, size_t gridSize[3], size_t blockSize[3], int async=DEFAULT_QUEUE, int num_waits=0, int *waits=NULL, int threadID=NO_THREAD_ID); extern HI_error_t HI_synchronize( int forcedSync = 0, int threadID=NO_THREAD_ID); ///////////////////////////// //Device Memory Allocation // ///////////////////////////// extern HI_error_t HI_malloc1D( const void *hostPtr, void** devPtr, size_t count, int asyncID, HI_MallocKind_t flags=HI_MEM_READ_WRITE, int threadID=NO_THREAD_ID); extern HI_error_t HI_malloc2D( const void *hostPtr, void** devPtr, size_t* pitch, size_t widthInBytes, size_t height, int asyncID, HI_MallocKind_t flags=HI_MEM_READ_WRITE, int threadID=NO_THREAD_ID); extern HI_error_t HI_malloc3D( const void *hostPtr, void** devPtr, size_t* pitch, size_t widthInBytes, size_t height, size_t depth, int asyncID, HI_MallocKind_t flags=HI_MEM_READ_WRITE, int threadID=NO_THREAD_ID); extern HI_error_t HI_free( const void *hostPtr, int asyncID, int threadID=NO_THREAD_ID); extern HI_error_t HI_free_async( const void *hostPtr, int asyncID, int threadID=NO_THREAD_ID); extern void HI_tempMalloc1D( void** tempPtr, size_t count, acc_device_t devType, HI_MallocKind_t flags, int threadID=NO_THREAD_ID); extern void HI_tempFree( void** tempPtr, acc_device_t devType, int threadID=NO_THREAD_ID); extern void HI_tempFree_async( void** tempPtr, acc_device_t devType, int asyncID, int threadID=NO_THREAD_ID); ///////////////////////////////////////////////// //Memory transfers between a host and a device // ///////////////////////////////////////////////// extern HI_error_t HI_memcpy(void *dst, const void *src, size_t count, HI_MemcpyKind_t kind, int trType, int threadID=NO_THREAD_ID); extern HI_error_t HI_memcpy_async(void *dst, const void *src, size_t count, HI_MemcpyKind_t kind, int trType, int async, int num_waits, int *waits, int threadID=NO_THREAD_ID); extern HI_error_t HI_memcpy_asyncS(void *dst, const void *src, size_t count, HI_MemcpyKind_t kind, int trType, int async, int num_waits, int *waits, int threadID=NO_THREAD_ID); extern HI_error_t HI_memcpy2D(void *dst, size_t dpitch, const void *src, size_t spitch, size_t widthInBytes, size_t height, HI_MemcpyKind_t kind, int threadID=NO_THREAD_ID); extern HI_error_t HI_memcpy2D_async(void *dst, size_t dpitch, const void *src, size_t spitch, size_t widthInBytes, size_t height, HI_MemcpyKind_t kind, int async, int num_waits, int *waits, int threadID=NO_THREAD_ID); //extern HI_error_t HI_memcpy3D(void *dst, size_t dpitch, const void *src, size_t spitch, // size_t widthInBytes, size_t height, size_t depth, HI_MemcpyKind_t kind, int threadID=NO_THREAD_ID); //extern HI_error_t HI_memcpy3D_async(void *dst, size_t dpitch, const void *src, // size_t spitch, size_t widthInBytes, size_t height, size_t depth, // HI_MemcpyKind_t kind, int async, int num_waits=0, int *waits=NULL, int threadID=NO_THREAD_ID); extern HI_error_t HI_memcpy_const(void *hostPtr, std::string constName, HI_MemcpyKind_t kind, size_t count, int threadID=NO_THREAD_ID); extern HI_error_t HI_memcpy_const_async(void *hostPtr, std::string constName, HI_MemcpyKind_t kind, size_t count, int async, int num_waits, int *waits, int threadID=NO_THREAD_ID); extern HI_error_t HI_present_or_memcpy_const(void *hostPtr, std::string constName, HI_MemcpyKind_t kind, size_t count, int threadID=NO_THREAD_ID); //////////////////////////////////////////////// // Experimental API to support unified memory // //////////////////////////////////////////////// extern HI_error_t HI_malloc1D_unified( const void *hostPtr, void** devPtr, size_t count, int asyncID, HI_MallocKind_t flags, int threadID=NO_THREAD_ID); extern HI_error_t HI_memcpy_unified(void *dst, const void *src, size_t count, HI_MemcpyKind_t kind, int trType, int threadID=NO_THREAD_ID); extern HI_error_t HI_free_unified( const void *hostPtr, int asyncID, int threadID=NO_THREAD_ID); //////////////////////////// //Internal mapping tables // //////////////////////////// extern HI_error_t HI_get_device_address(const void * hostPtr, void ** devPtr, int asyncID, int threadID=NO_THREAD_ID); extern HI_error_t HI_get_device_address(const void * hostPtr, void ** devPtrBase, size_t * offset, int asyncID, int threadID=NO_THREAD_ID); extern HI_error_t HI_get_device_address(const void * hostPtr, void ** devPtrBase, size_t * offset, size_t * size, int asyncID, int threadID=NO_THREAD_ID); extern HI_error_t HI_set_device_address(const void * hostPtr, void * devPtr, size_t size, int asyncID, int threadID=NO_THREAD_ID); extern HI_error_t HI_remove_device_address(const void * hostPtr, int asyncID, int threadID=NO_THREAD_ID); extern HI_error_t HI_get_host_address(const void *devPtr, void** hostPtr, int asyncID, int threadID=NO_THREAD_ID); extern HI_error_t HI_get_temphost_address(const void * hostPtr, void ** temphostPtr, int asyncID, int threadID=NO_THREAD_ID); //extern HI_error_t HI_set_temphost_address(const void * hostPtr, void * temphostPtr, int asyncID, int threadID=NO_THREAD_ID); //extern HI_error_t HI_remove_temphost_address(const void * hostPtr, int threadID=NO_THREAD_ID); //Get and increase an internal reference counter of the present table mapping for the host variable. (It also returns the corresponding device pointer.) extern int HI_getninc_prtcounter(const void * hostPtr, void **devPtr, int asyncID, int threadID=NO_THREAD_ID); //Decrease and get an internal reference counter of the present table mapping for the host variable. (It also returns the corresponding device pointer.) extern int HI_decnget_prtcounter(const void * hostPtr, void **devPtr, int asyncID, int threadID=NO_THREAD_ID); ///////////////////////////////////////////////////////////////////////// //async integer argument => internal handler (ex: CUDA stream) mapping // ///////////////////////////////////////////////////////////////////////// //extern HI_error_t HI_create_async_handle( int async, int threadID=NO_THREAD_ID); //extern int HI_contain_async_handle( int async , int threadID=NO_THREAD_ID); //extern HI_error_t HI_delete_async_handle( int async , int threadID=NO_THREAD_ID); extern void HI_set_async(int asyncId, int threadID=NO_THREAD_ID); extern void HI_set_context(int threadID=NO_THREAD_ID); //////////////////////////////// //Memory management functions // //////////////////////////////// extern void HI_check_read(const void * hostPtr, acc_device_t dtype, const char *varName, const char *refName, int loopIndex, int threadID=NO_THREAD_ID); extern void HI_check_write(const void * hostPtr, acc_device_t dtype, const char *varName, const char *refName, int loopIndex, int threadID=NO_THREAD_ID); extern void HI_set_status(const void * hostPtr, acc_device_t dtype, HI_memstatus_t status, const char * varName, const char * refName, int loopIndex, int threadID=NO_THREAD_ID); extern void HI_reset_status(const void * hostPtr, acc_device_t dtype, HI_memstatus_t status, int asyncID, int threadID=NO_THREAD_ID); //Below is deprecated extern void HI_init_status(const void * hostPtr, int threadID=NO_THREAD_ID); //////////////////// //Texture function // //////////////////// extern HI_error_t HI_bind_tex(std::string texName, HI_datatype_t type, const void *devPtr, size_t size, int threadID=NO_THREAD_ID); //////////////////// //Misc. functions // //////////////////// extern double HI_get_localtime(); extern const char* HI_get_device_type_string( acc_device_t devtype ); //////////////////////////////////////////// //Functions used for program verification // //////////////////////////////////////////// extern void HI_waitS1(int asyncId, int threadID=NO_THREAD_ID); extern void HI_waitS2(int asyncId, int threadID=NO_THREAD_ID); /////////////////////////////////////////// //Functions used for OpenMP4 translation // /////////////////////////////////////////// #include "omp_helper.h" /////////////////////////////////////// //Functions used for resilience test // /////////////////////////////////////// #include "resilience.h" #endif
softplus_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "module/module.h" #include "operator/op.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include "utility/float.h" #include "utility/sys_port.h" #include "utility/log.h" #include <math.h> #if defined(__APPLE__) || defined(_MSC_VER) #include <stdio.h> #endif int ref_softplus_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = (float*)input_tensor->data; float* out_data = (float*)output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = log(exp(src[i]) + 1.0f); } } return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int ret = -1; if (input_tensor->data_type == TENGINE_DT_FP32) ret = ref_softplus_fp32(input_tensor, output_tensor, exec_graph->num_thread); else printf("Input data type %d not to be supported.\n", input_tensor->data_type); return ret; } static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* node = exec_node->ir_node; struct graph* ir_graph = node->graph; struct tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]); struct tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]); int ret = set_ir_tensor_shape(output, input->dims, input->dim_num); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = { .prerun = NULL, .run = run, .reshape = reshape, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_softplus_ref_op() { return register_builtin_node_ops(OP_SOFTPLUS, &hcl_node_ops); } int unregister_softplus_ref_op() { return unregister_builtin_node_ops(OP_SOFTPLUS, &hcl_node_ops); }
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/property.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/enhance.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* Forward declarations. */ static MagickBooleanType TransformsRGBImage(Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o l o r s p a c e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageColorspaceType() returns the potential type of image: % sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc. % % To ensure the image type matches its potential, use SetImageColorspaceType(): % % (void) SetImageColorspaceType(image,GetImageColorspaceType(image), % exception); % % The format of the GetImageColorspaceType method is: % % ColorspaceType GetImageColorspaceType(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ColorspaceType GetImageColorspaceType(const Image *image, ExceptionInfo *exception) { ColorspaceType colorspace; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colorspace=image->colorspace; type=IdentifyImageType(image,exception); if (IsGrayImageType(type)) colorspace=GRAYColorspace; return(colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + s R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % sRGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the sRGBTransformImage method is: % % MagickBooleanType sRGBTransformImage(Image *image, % const ColorspaceType colorspace,EsceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertAdobe98ToRGB(const double r,const double g, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertAdobe98ToXYZ(r,g,b,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertDisplayP3ToRGB(const double r,const double g, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertDisplayP3ToXYZ(r,g,b,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertProPhotoToRGB(const double r,const double g, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertProPhotoToXYZ(r,g,b,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertRGBToCMY(const double red,const double green, const double blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static void ConvertRGBToAdobe98(const double red,const double green, const double blue,double *r,double *g,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToAdobe98(X,Y,Z,r,g,b); } static void ConvertRGBToDisplayP3(const double red,const double green, const double blue,double *r,double *g,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToDisplayP3(X,Y,Z,r,g,b); } static void ConvertRGBToProPhoto(const double red,const double green, const double blue,double *r,double *g,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToProPhoto(X,Y,Z,r,g,b); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const double red,const double green, const double blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLuv(const double red,const double green, const double blue,const IlluminantType illuminant,double *L,double *u, double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,illuminant,L,u,v); } static void ConvertRGBToxyY(const double red,const double green, const double blue,double *low_x,double *low_y,double *cap_Y) { double gamma, X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); gamma=PerceptibleReciprocal(X+Y+Z); *low_x=gamma*X; *low_y=gamma*Y; *cap_Y=Y; } static void inline ConvertXYZToJzazbz(const double X,const double Y, const double Z,const double white_luminance,double *Jz,double *az,double *bz) { #define Jzazbz_b 1.15 /* https://observablehq.com/@jrus/jzazbz */ #define Jzazbz_g 0.66 #define Jzazbz_c1 (3424.0/4096.0) #define Jzazbz_c2 (2413.0/128.0) #define Jzazbz_c3 (2392.0/128.0) #define Jzazbz_n (2610.0/16384.0) #define Jzazbz_p (1.7*2523.0/32.0) #define Jzazbz_d (-0.56) #define Jzazbz_d0 (1.6295499532821566e-11) double gamma, Iz, L, Lp, M, Mp, S, Sp, Xp, Yp, Zp; Xp=(Jzazbz_b*X-Z*(Jzazbz_b-1)); Yp=(Jzazbz_g*Y-X*(Jzazbz_g-1)); Zp=Z; L=0.41478972*Xp+0.579999*Yp+0.0146480*Zp; M=(-0.2015100)*Xp+1.120649*Yp+0.0531008*Zp; S=(-0.0166008)*Xp+0.264800*Yp+0.6684799*Zp; gamma=pow(L*PerceptibleReciprocal(white_luminance),Jzazbz_n); Lp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p); gamma=pow(M*PerceptibleReciprocal(white_luminance),Jzazbz_n); Mp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p); gamma=pow(S*PerceptibleReciprocal(white_luminance),Jzazbz_n); Sp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p); Iz=0.5*Lp+0.5*Mp; *az=3.52400*Lp-4.066708*Mp+0.542708*Sp+0.5; *bz=0.199076*Lp+1.096799*Mp-1.295875*Sp+0.5; *Jz=((Jzazbz_d+1.0)*Iz)/(Jzazbz_d*Iz+1.0)-Jzazbz_d0; } static void inline ConvertJzazbzToXYZ(const double Jz,const double az, const double bz,const double white_luminance,double *X,double *Y,double *Z) { double azz, bzz, gamma, Iz, L, Lp, M, Mp, S, Sp, Xp, Yp, Zp; gamma=Jz+Jzazbz_d0; Iz=gamma/(Jzazbz_d-Jzazbz_d*gamma+1.0); azz=az-0.5; bzz=bz-0.5; Lp=Iz+0.138605043271539*azz+0.0580473161561189*bzz; Mp=Iz-0.138605043271539*azz-0.0580473161561189*bzz; Sp=Iz-0.0960192420263189*azz-0.811891896056039*bzz; gamma=pow(Lp,1.0/Jzazbz_p); L=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/ Jzazbz_n); gamma=pow(Mp,1.0/Jzazbz_p); M=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/ Jzazbz_n); gamma=pow(Sp,1.0/Jzazbz_p); S=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/ Jzazbz_n); Xp=1.92422643578761*L-1.00479231259537*M+0.037651404030618*S; Yp=0.350316762094999*L+0.726481193931655*M-0.065384422948085*S; Zp=(-0.0909828109828476)*L-0.312728290523074*M+1.52276656130526*S; *X=(Xp+(Jzazbz_b-1.0)*Zp)/Jzazbz_b; *Y=(Yp+(Jzazbz_g-1.0)**X)/Jzazbz_g; *Z=Zp; } static void ConvertRGBToJzazbz(const double red,const double green, const double blue,const double white_luminance,double *Jz,double *az, double *bz) { double X, Y, Z; ConvertRGBToXYZ(red,blue,green,&X,&Y,&Z); ConvertXYZToJzazbz(X,Y,Z,white_luminance,Jz,az,bz); } static void ConvertJzazbzToRGB(const double Jz,const double az, const double bz,const double white_luminance,double *red,double *green, double *blue) { double X, Y, Z; ConvertJzazbzToXYZ(Jz,az,bz,white_luminance,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,blue,green); } static void ConvertRGBToYDbDr(const double red,const double green, const double blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const double red,const double green, const double blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } static void ConvertRGBToYPbPr(const double red,const double green, const double blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const double red,const double green, const double blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const double red,const double green, const double blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static MagickBooleanType sRGBTransformImage(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { #define sRGBTransformImageTag "RGBTransform/Image" CacheView *image_view; const char *artifact; IlluminantType illuminant = D65Illuminant; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); artifact=GetImageArtifact(image,"color:illuminant"); if (artifact != (const char *) NULL) { illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions, MagickFalse,artifact); if ((ssize_t) illuminant < 0) illuminant=UndefinedIlluminant; } status=MagickTrue; progress=0; switch (colorspace) { case CMYKColorspace: { PixelInfo zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertRGBToCMYK(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType gray; gray=0.212656*DecodePixelGamma(GetPixelRed(image,q))+0.715158* DecodePixelGamma(GetPixelGreen(image,q))+0.072186* DecodePixelGamma(GetPixelBlue(image,q)); SetPixelGray(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType gray; gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+ 0.072186*GetPixelBlue(image,q); SetPixelGray(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case Adobe98Colorspace: case DisplayP3Colorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case JzazbzColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case ProPhotoColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { const char *value; double white_luminance; /* Transform image from sRGB to target colorspace. */ white_luminance=10000.0; value=GetImageProperty(image,"white-luminance",exception); if (value != (const char *) NULL) white_luminance=StringToDouble(value,(char **) NULL); if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case Adobe98Colorspace: { ConvertRGBToAdobe98(red,green,blue,&X,&Y,&Z); break; } case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case DisplayP3Colorspace: { ConvertRGBToDisplayP3(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case JzazbzColorspace: { ConvertRGBToJzazbz(red,green,blue,white_luminance,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,illuminant,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,illuminant,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,illuminant,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,illuminant,&X,&Y,&Z); break; } case ProPhotoColorspace: { ConvertRGBToProPhoto(red,green,blue,&X,&Y,&Z); break; } case xyYColorspace: { ConvertRGBToxyY(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: { X=QuantumScale*red; Y=QuantumScale*green; Z=QuantumScale*blue; break; } } SetPixelRed(image,ClampToQuantum(QuantumRange*X),q); SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q); SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002* PerceptibleReciprocal(film_gamma)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002* PerceptibleReciprocal(film_gamma)))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=(double) DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=(double) DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q); SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))], q); SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212656*R+0.715158*G+0.072186*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.005382*i; x_map[i].y=(-0.003296)*i; x_map[i].z=0.009410*i; y_map[i].x=0.010566*i; y_map[i].y=(-0.006471)*i; y_map[i].z=(-0.007880)*i; z_map[i].x=0.002052*i; z_map[i].y=0.009768*i; z_map[i].z=(-0.001530)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.298839*(1.099*i-0.099); x_map[i].y=(-0.298839)*(1.099*i-0.099); x_map[i].z=0.70100*(1.099*i-0.099); y_map[i].x=0.586811*(1.099*i-0.099); y_map[i].y=(-0.586811)*(1.099*i-0.099); y_map[i].z=(-0.586811)*(1.099*i-0.099); z_map[i].x=0.114350*(1.099*i-0.099); z_map[i].y=0.88600*(1.099*i-0.099); z_map[i].z=(-0.114350)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); x_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].x=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; z_map[i].y=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; Quantum *magick_restrict q; ssize_t x; unsigned int blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(image,q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(image,q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(image,q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ primary_info.z; SetPixelRed(image,ScaleMapToQuantum(pixel.red),q); SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q); SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,sRGBTransformImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { unsigned int blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red); image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green); image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptiionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { ImageType type; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000/2.200; (void) memset(&image->chromaticity,0,sizeof(image->chromaticity)); type=image->type; if (IsGrayColorspace(colorspace) != MagickFalse) { if (colorspace == LinearGRAYColorspace) image->gamma=1.000; type=GrayscaleType; } else if ((IsRGBColorspace(colorspace) != MagickFalse) || (colorspace == XYZColorspace) || (colorspace == xyYColorspace)) image->gamma=1.000; else { image->rendering_intent=PerceptualIntent; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } status=SyncImagePixelCache(image,exception); image->type=type; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageGray() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities and changes the type of the image to % bi-level or grayscale. % % The format of the SetImageGray method is: % % MagickBooleanType SetImageGray(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageGray(Image *image, ExceptionInfo *exception) { const char *value; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsImageGray(image) != MagickFalse) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale",exception); if (IsStringFalse(value) != MagickFalse) return(MagickFalse); type=IdentifyImageGray(image,exception); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMonochrome() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange and changes the type of the image to bi-level. % % The format of the SetImageMonochrome method is: % % MagickBooleanType SetImageMonochrome(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMonochrome(Image *image, ExceptionInfo *exception) { MagickBooleanType is_bilevel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsImageMonochrome(image) != MagickFalse) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); is_bilevel=IdentifyImageMonochrome(image,exception); if (is_bilevel == MagickFalse) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=BilevelType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace, changing the % image data to reflect the new colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(SetImageColorspace(image,colorspace,exception)); (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace,exception)); /* Convert the reference image from an alternate colorspace to sRGB. */ if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformsRGBImage(image,exception)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformsRGBImage(image,exception); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (sRGBTransformImage(image,colorspace,exception) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m s R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformsRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values % to be [0..QuantumRange]. % % The format of the TransformsRGBImage method is: % % MagickBooleanType TransformsRGBImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,double *red,double *green,double *blue) { *red=QuantumRange*(1.0-cyan); *green=QuantumRange*(1.0-magenta); *blue=QuantumRange*(1.0-yellow); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,double *red,double *green,double *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,const IlluminantType illuminant,double *red,double *green, double *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,illuminant,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const double value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertLabToRGB(const double L,const double a, const double b,const IlluminantType illuminant,double *red,double *green, double *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),illuminant,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertxyYToRGB(const double low_x,const double low_y, const double cap_Y,double *red,double *green,double *blue) { double gamma, X, Y, Z; gamma=PerceptibleReciprocal(low_y); X=gamma*cap_Y*low_x; Y=cap_Y; Z=gamma*cap_Y*(1.0-low_x-low_y); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, double *red,double *green,double *blue) { *red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+ 1.4019995886561440468*(Pr-0.5)); *green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)- 0.71413649331646789076*(Pr-0.5)); *blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+ 2.1453384174593273e-06*(Pr-0.5)); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,double *red,double *green,double *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, double *red,double *green,double *blue) { *red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754* (Q-0.5)); *green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427* (Q-0.5)); *blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374* (Q-0.5)); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, double *red,double *green,double *blue) { *red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5)); *green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5)); *blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5)); } static void ConvertYUVToRGB(const double Y,const double U,const double V, double *red,double *green,double *blue) { *red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825* (V-0.5)); *green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797* (V-0.5)); *blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04* (V-0.5)); } static MagickBooleanType TransformsRGBImage(Image *image, ExceptionInfo *exception) { #define TransformsRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000f }; CacheView *image_view; const char *artifact; IlluminantType illuminant = D65Illuminant; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); artifact=GetImageArtifact(image,"color:illuminant"); if (artifact != (const char *) NULL) { illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions, MagickFalse,artifact); if ((ssize_t) illuminant < 0) illuminant=UndefinedIlluminant; } status=MagickTrue; progress=0; switch (image->colorspace) { case CMYKColorspace: { PixelInfo zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertCMYKToRGB(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: { /* Transform linear GRAY to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=0.212656*EncodePixelGamma(GetPixelRed(image,q))+0.715158* EncodePixelGamma(GetPixelGreen(image,q))+0.072186* EncodePixelGamma(GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(gray),q); SetPixelGreen(image,ClampToQuantum(gray),q); SetPixelBlue(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: { /* Transform linear GRAY to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+ 0.072186*GetPixelBlue(image,q); SetPixelRed(image,ClampToQuantum(gray),q); SetPixelGreen(image,ClampToQuantum(gray),q); SetPixelBlue(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case Adobe98Colorspace: case CMYColorspace: case DisplayP3Colorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case JzazbzColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case ProPhotoColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { const char *value; double white_luminance; /* Transform image from source colorspace to sRGB. */ white_luminance=10000.0; value=GetImageProperty(image,"white-luminance",exception); if (value != (const char *) NULL) white_luminance=StringToDouble(value,(char **) NULL); if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; X=QuantumScale*GetPixelRed(image,q); Y=QuantumScale*GetPixelGreen(image,q); Z=QuantumScale*GetPixelBlue(image,q); switch (image->colorspace) { case Adobe98Colorspace: { ConvertAdobe98ToRGB(X,Y,Z,&red,&green,&blue); break; } case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case DisplayP3Colorspace: { ConvertDisplayP3ToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case JzazbzColorspace: { ConvertJzazbzToRGB(X,Y,Z,white_luminance,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,illuminant,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,illuminant,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,illuminant,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,illuminant,&red,&green,&blue); break; } case ProPhotoColorspace: { ConvertProPhotoToRGB(X,Y,Z,&red,&green,&blue); break; } case xyYColorspace: { ConvertxyYToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: { red=QuantumRange*X; green=QuantumRange*Y; blue=QuantumRange*Z; break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002* PerceptibleReciprocal(film_gamma)); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002* PerceptibleReciprocal(film_gamma))-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))]; green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))]; blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))]; SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType) red)),q); SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType) green)),q); SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType) blue)),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (image->colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) 0.0000000; z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) 0.0000000; } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(image,q)); green=ScaleQuantumToMap(GetPixelGreen(image,q)); blue=ScaleQuantumToMap(GetPixelBlue(image,q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(image,ClampToQuantum(pixel.red),q); SetPixelGreen(image,ClampToQuantum(pixel.green),q); SetPixelBlue(image,ClampToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransformsRGBImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; size_t blue, green, red; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=(double) ClampToQuantum(pixel.red); image->colormap[i].green=(double) ClampToQuantum(pixel.green); image->colormap[i].blue=(double) ClampToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(MagickTrue); }
convolution_7x7_pack1to8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv7x7s2_pack1to8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const __fp16* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16(0.f); out0.fill(_bias0); for (int q = 0; q < inch; q++) { __fp16* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const __fp16* r0 = img0.row<const __fp16>(0); const __fp16* r1 = img0.row<const __fp16>(1); const __fp16* r2 = img0.row<const __fp16>(2); const __fp16* r3 = img0.row<const __fp16>(3); const __fp16* r4 = img0.row<const __fp16>(4); const __fp16* r5 = img0.row<const __fp16>(5); const __fp16* r6 = img0.row<const __fp16>(6); const __fp16* kptr = kernel.channel(p).row<const __fp16>(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 7 < outw; j += 8) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%0], #64 \n" // sum0 "prfm pldl1keep, [%1, #384] \n" "ld1 {v0.8h, v1.8h, v2.8h}, [%1] \n" // r0 "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0 "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[2] \n" "fmla v26.8h, v16.8h, v0.h[4] \n" "fmla v27.8h, v16.8h, v0.h[6] \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v16.8h, v1.h[2] \n" "fmla v30.8h, v16.8h, v1.h[4] \n" "fmla v31.8h, v16.8h, v1.h[6] \n" "sub %0, %0, #64 \n" "fmla v24.8h, v17.8h, v0.h[1] \n" "fmla v25.8h, v17.8h, v0.h[3] \n" "fmla v26.8h, v17.8h, v0.h[5] \n" "fmla v27.8h, v17.8h, v0.h[7] \n" "fmla v28.8h, v17.8h, v1.h[1] \n" "fmla v29.8h, v17.8h, v1.h[3] \n" "fmla v30.8h, v17.8h, v1.h[5] \n" "fmla v31.8h, v17.8h, v1.h[7] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v24.8h, v18.8h, v0.h[2] \n" "fmla v25.8h, v18.8h, v0.h[4] \n" "fmla v26.8h, v18.8h, v0.h[6] \n" "fmla v27.8h, v18.8h, v1.h[0] \n" "fmla v28.8h, v18.8h, v1.h[2] \n" "fmla v29.8h, v18.8h, v1.h[4] \n" "fmla v30.8h, v18.8h, v1.h[6] \n" "fmla v31.8h, v18.8h, v2.h[0] \n" "fmla v24.8h, v19.8h, v0.h[3] \n" "fmla v25.8h, v19.8h, v0.h[5] \n" "fmla v26.8h, v19.8h, v0.h[7] \n" "fmla v27.8h, v19.8h, v1.h[1] \n" "fmla v28.8h, v19.8h, v1.h[3] \n" "fmla v29.8h, v19.8h, v1.h[5] \n" "fmla v30.8h, v19.8h, v1.h[7] \n" "fmla v31.8h, v19.8h, v2.h[1] \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v4.8h, v5.8h, v6.8h}, [%2] \n" // r1 "fmla v24.8h, v20.8h, v0.h[4] \n" "fmla v25.8h, v20.8h, v0.h[6] \n" "fmla v26.8h, v20.8h, v1.h[0] \n" "fmla v27.8h, v20.8h, v1.h[2] \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v20.8h, v1.h[6] \n" "fmla v30.8h, v20.8h, v2.h[0] \n" "fmla v31.8h, v20.8h, v2.h[2] \n" "fmla v24.8h, v21.8h, v0.h[5] \n" "fmla v25.8h, v21.8h, v0.h[7] \n" "fmla v26.8h, v21.8h, v1.h[1] \n" "fmla v27.8h, v21.8h, v1.h[3] \n" "fmla v28.8h, v21.8h, v1.h[5] \n" "fmla v29.8h, v21.8h, v1.h[7] \n" "fmla v30.8h, v21.8h, v2.h[1] \n" "fmla v31.8h, v21.8h, v2.h[3] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v24.8h, v22.8h, v0.h[6] \n" "fmla v25.8h, v22.8h, v1.h[0] \n" "fmla v26.8h, v22.8h, v1.h[2] \n" "fmla v27.8h, v22.8h, v1.h[4] \n" "fmla v28.8h, v22.8h, v1.h[6] \n" "fmla v29.8h, v22.8h, v2.h[0] \n" "fmla v30.8h, v22.8h, v2.h[2] \n" "fmla v31.8h, v22.8h, v2.h[4] \n" "fmla v24.8h, v23.8h, v4.h[0] \n" "fmla v25.8h, v23.8h, v4.h[2] \n" "fmla v26.8h, v23.8h, v4.h[4] \n" "fmla v27.8h, v23.8h, v4.h[6] \n" "fmla v28.8h, v23.8h, v5.h[0] \n" "fmla v29.8h, v23.8h, v5.h[2] \n" "fmla v30.8h, v23.8h, v5.h[4] \n" "fmla v31.8h, v23.8h, v5.h[6] \n" "fmla v24.8h, v16.8h, v4.h[1] \n" "fmla v25.8h, v16.8h, v4.h[3] \n" "fmla v26.8h, v16.8h, v4.h[5] \n" "fmla v27.8h, v16.8h, v4.h[7] \n" "fmla v28.8h, v16.8h, v5.h[1] \n" "fmla v29.8h, v16.8h, v5.h[3] \n" "fmla v30.8h, v16.8h, v5.h[5] \n" "fmla v31.8h, v16.8h, v5.h[7] \n" "fmla v24.8h, v17.8h, v4.h[2] \n" "fmla v25.8h, v17.8h, v4.h[4] \n" "fmla v26.8h, v17.8h, v4.h[6] \n" "fmla v27.8h, v17.8h, v5.h[0] \n" "fmla v28.8h, v17.8h, v5.h[2] \n" "fmla v29.8h, v17.8h, v5.h[4] \n" "fmla v30.8h, v17.8h, v5.h[6] \n" "fmla v31.8h, v17.8h, v6.h[0] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v24.8h, v18.8h, v4.h[3] \n" "fmla v25.8h, v18.8h, v4.h[5] \n" "fmla v26.8h, v18.8h, v4.h[7] \n" "fmla v27.8h, v18.8h, v5.h[1] \n" "fmla v28.8h, v18.8h, v5.h[3] \n" "fmla v29.8h, v18.8h, v5.h[5] \n" "fmla v30.8h, v18.8h, v5.h[7] \n" "fmla v31.8h, v18.8h, v6.h[1] \n" "fmla v24.8h, v19.8h, v4.h[4] \n" "fmla v25.8h, v19.8h, v4.h[6] \n" "fmla v26.8h, v19.8h, v5.h[0] \n" "fmla v27.8h, v19.8h, v5.h[2] \n" "fmla v28.8h, v19.8h, v5.h[4] \n" "fmla v29.8h, v19.8h, v5.h[6] \n" "fmla v30.8h, v19.8h, v6.h[0] \n" "fmla v31.8h, v19.8h, v6.h[2] \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v0.8h, v1.8h, v2.8h}, [%3] \n" // r2 "fmla v24.8h, v20.8h, v4.h[5] \n" "fmla v25.8h, v20.8h, v4.h[7] \n" "fmla v26.8h, v20.8h, v5.h[1] \n" "fmla v27.8h, v20.8h, v5.h[3] \n" "fmla v28.8h, v20.8h, v5.h[5] \n" "fmla v29.8h, v20.8h, v5.h[7] \n" "fmla v30.8h, v20.8h, v6.h[1] \n" "fmla v31.8h, v20.8h, v6.h[3] \n" "fmla v24.8h, v21.8h, v4.h[6] \n" "fmla v25.8h, v21.8h, v5.h[0] \n" "fmla v26.8h, v21.8h, v5.h[2] \n" "fmla v27.8h, v21.8h, v5.h[4] \n" "fmla v28.8h, v21.8h, v5.h[6] \n" "fmla v29.8h, v21.8h, v6.h[0] \n" "fmla v30.8h, v21.8h, v6.h[2] \n" "fmla v31.8h, v21.8h, v6.h[4] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v24.8h, v22.8h, v0.h[0] \n" "fmla v25.8h, v22.8h, v0.h[2] \n" "fmla v26.8h, v22.8h, v0.h[4] \n" "fmla v27.8h, v22.8h, v0.h[6] \n" "fmla v28.8h, v22.8h, v1.h[0] \n" "fmla v29.8h, v22.8h, v1.h[2] \n" "fmla v30.8h, v22.8h, v1.h[4] \n" "fmla v31.8h, v22.8h, v1.h[6] \n" "fmla v24.8h, v23.8h, v0.h[1] \n" "fmla v25.8h, v23.8h, v0.h[3] \n" "fmla v26.8h, v23.8h, v0.h[5] \n" "fmla v27.8h, v23.8h, v0.h[7] \n" "fmla v28.8h, v23.8h, v1.h[1] \n" "fmla v29.8h, v23.8h, v1.h[3] \n" "fmla v30.8h, v23.8h, v1.h[5] \n" "fmla v31.8h, v23.8h, v1.h[7] \n" "fmla v24.8h, v16.8h, v0.h[2] \n" "fmla v25.8h, v16.8h, v0.h[4] \n" "fmla v26.8h, v16.8h, v0.h[6] \n" "fmla v27.8h, v16.8h, v1.h[0] \n" "fmla v28.8h, v16.8h, v1.h[2] \n" "fmla v29.8h, v16.8h, v1.h[4] \n" "fmla v30.8h, v16.8h, v1.h[6] \n" "fmla v31.8h, v16.8h, v2.h[0] \n" "fmla v24.8h, v17.8h, v0.h[3] \n" "fmla v25.8h, v17.8h, v0.h[5] \n" "fmla v26.8h, v17.8h, v0.h[7] \n" "fmla v27.8h, v17.8h, v1.h[1] \n" "fmla v28.8h, v17.8h, v1.h[3] \n" "fmla v29.8h, v17.8h, v1.h[5] \n" "fmla v30.8h, v17.8h, v1.h[7] \n" "fmla v31.8h, v17.8h, v2.h[1] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v24.8h, v18.8h, v0.h[4] \n" "fmla v25.8h, v18.8h, v0.h[6] \n" "fmla v26.8h, v18.8h, v1.h[0] \n" "fmla v27.8h, v18.8h, v1.h[2] \n" "fmla v28.8h, v18.8h, v1.h[4] \n" "fmla v29.8h, v18.8h, v1.h[6] \n" "fmla v30.8h, v18.8h, v2.h[0] \n" "fmla v31.8h, v18.8h, v2.h[2] \n" "prfm pldl1keep, [%4, #384] \n" "ld1 {v4.8h, v5.8h, v6.8h}, [%4] \n" // r3 "fmla v24.8h, v19.8h, v0.h[5] \n" "fmla v25.8h, v19.8h, v0.h[7] \n" "fmla v26.8h, v19.8h, v1.h[1] \n" "fmla v27.8h, v19.8h, v1.h[3] \n" "fmla v28.8h, v19.8h, v1.h[5] \n" "fmla v29.8h, v19.8h, v1.h[7] \n" "fmla v30.8h, v19.8h, v2.h[1] \n" "fmla v31.8h, v19.8h, v2.h[3] \n" "fmla v24.8h, v20.8h, v0.h[6] \n" "fmla v25.8h, v20.8h, v1.h[0] \n" "fmla v26.8h, v20.8h, v1.h[2] \n" "fmla v27.8h, v20.8h, v1.h[4] \n" "fmla v28.8h, v20.8h, v1.h[6] \n" "fmla v29.8h, v20.8h, v2.h[0] \n" "fmla v30.8h, v20.8h, v2.h[2] \n" "fmla v31.8h, v20.8h, v2.h[4] \n" "fmla v24.8h, v21.8h, v4.h[0] \n" "fmla v25.8h, v21.8h, v4.h[2] \n" "fmla v26.8h, v21.8h, v4.h[4] \n" "fmla v27.8h, v21.8h, v4.h[6] \n" "fmla v28.8h, v21.8h, v5.h[0] \n" "fmla v29.8h, v21.8h, v5.h[2] \n" "fmla v30.8h, v21.8h, v5.h[4] \n" "fmla v31.8h, v21.8h, v5.h[6] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v24.8h, v22.8h, v4.h[1] \n" "fmla v25.8h, v22.8h, v4.h[3] \n" "fmla v26.8h, v22.8h, v4.h[5] \n" "fmla v27.8h, v22.8h, v4.h[7] \n" "fmla v28.8h, v22.8h, v5.h[1] \n" "fmla v29.8h, v22.8h, v5.h[3] \n" "fmla v30.8h, v22.8h, v5.h[5] \n" "fmla v31.8h, v22.8h, v5.h[7] \n" "fmla v24.8h, v23.8h, v4.h[2] \n" "fmla v25.8h, v23.8h, v4.h[4] \n" "fmla v26.8h, v23.8h, v4.h[6] \n" "fmla v27.8h, v23.8h, v5.h[0] \n" "fmla v28.8h, v23.8h, v5.h[2] \n" "fmla v29.8h, v23.8h, v5.h[4] \n" "fmla v30.8h, v23.8h, v5.h[6] \n" "fmla v31.8h, v23.8h, v6.h[0] \n" "fmla v24.8h, v16.8h, v4.h[3] \n" "fmla v25.8h, v16.8h, v4.h[5] \n" "fmla v26.8h, v16.8h, v4.h[7] \n" "fmla v27.8h, v16.8h, v5.h[1] \n" "fmla v28.8h, v16.8h, v5.h[3] \n" "fmla v29.8h, v16.8h, v5.h[5] \n" "fmla v30.8h, v16.8h, v5.h[7] \n" "fmla v31.8h, v16.8h, v6.h[1] \n" "fmla v24.8h, v17.8h, v4.h[4] \n" "fmla v25.8h, v17.8h, v4.h[6] \n" "fmla v26.8h, v17.8h, v5.h[0] \n" "fmla v27.8h, v17.8h, v5.h[2] \n" "fmla v28.8h, v17.8h, v5.h[4] \n" "fmla v29.8h, v17.8h, v5.h[6] \n" "fmla v30.8h, v17.8h, v6.h[0] \n" "fmla v31.8h, v17.8h, v6.h[2] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v24.8h, v18.8h, v4.h[5] \n" "fmla v25.8h, v18.8h, v4.h[7] \n" "fmla v26.8h, v18.8h, v5.h[1] \n" "fmla v27.8h, v18.8h, v5.h[3] \n" "fmla v28.8h, v18.8h, v5.h[5] \n" "fmla v29.8h, v18.8h, v5.h[7] \n" "fmla v30.8h, v18.8h, v6.h[1] \n" "fmla v31.8h, v18.8h, v6.h[3] \n" "prfm pldl1keep, [%5, #384] \n" "ld1 {v0.8h, v1.8h, v2.8h}, [%5] \n" // r4 "fmla v24.8h, v19.8h, v4.h[6] \n" "fmla v25.8h, v19.8h, v5.h[0] \n" "fmla v26.8h, v19.8h, v5.h[2] \n" "fmla v27.8h, v19.8h, v5.h[4] \n" "fmla v28.8h, v19.8h, v5.h[6] \n" "fmla v29.8h, v19.8h, v6.h[0] \n" "fmla v30.8h, v19.8h, v6.h[2] \n" "fmla v31.8h, v19.8h, v6.h[4] \n" "fmla v24.8h, v20.8h, v0.h[0] \n" "fmla v25.8h, v20.8h, v0.h[2] \n" "fmla v26.8h, v20.8h, v0.h[4] \n" "fmla v27.8h, v20.8h, v0.h[6] \n" "fmla v28.8h, v20.8h, v1.h[0] \n" "fmla v29.8h, v20.8h, v1.h[2] \n" "fmla v30.8h, v20.8h, v1.h[4] \n" "fmla v31.8h, v20.8h, v1.h[6] \n" "fmla v24.8h, v21.8h, v0.h[1] \n" "fmla v25.8h, v21.8h, v0.h[3] \n" "fmla v26.8h, v21.8h, v0.h[5] \n" "fmla v27.8h, v21.8h, v0.h[7] \n" "fmla v28.8h, v21.8h, v1.h[1] \n" "fmla v29.8h, v21.8h, v1.h[3] \n" "fmla v30.8h, v21.8h, v1.h[5] \n" "fmla v31.8h, v21.8h, v1.h[7] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v24.8h, v22.8h, v0.h[2] \n" "fmla v25.8h, v22.8h, v0.h[4] \n" "fmla v26.8h, v22.8h, v0.h[6] \n" "fmla v27.8h, v22.8h, v1.h[0] \n" "fmla v28.8h, v22.8h, v1.h[2] \n" "fmla v29.8h, v22.8h, v1.h[4] \n" "fmla v30.8h, v22.8h, v1.h[6] \n" "fmla v31.8h, v22.8h, v2.h[0] \n" "fmla v24.8h, v23.8h, v0.h[3] \n" "fmla v25.8h, v23.8h, v0.h[5] \n" "fmla v26.8h, v23.8h, v0.h[7] \n" "fmla v27.8h, v23.8h, v1.h[1] \n" "fmla v28.8h, v23.8h, v1.h[3] \n" "fmla v29.8h, v23.8h, v1.h[5] \n" "fmla v30.8h, v23.8h, v1.h[7] \n" "fmla v31.8h, v23.8h, v2.h[1] \n" "prfm pldl1keep, [%6, #384] \n" "ld1 {v4.8h, v5.8h, v6.8h}, [%6] \n" // r5 "fmla v24.8h, v16.8h, v0.h[4] \n" "fmla v25.8h, v16.8h, v0.h[6] \n" "fmla v26.8h, v16.8h, v1.h[0] \n" "fmla v27.8h, v16.8h, v1.h[2] \n" "fmla v28.8h, v16.8h, v1.h[4] \n" "fmla v29.8h, v16.8h, v1.h[6] \n" "fmla v30.8h, v16.8h, v2.h[0] \n" "fmla v31.8h, v16.8h, v2.h[2] \n" "fmla v24.8h, v17.8h, v0.h[5] \n" "fmla v25.8h, v17.8h, v0.h[7] \n" "fmla v26.8h, v17.8h, v1.h[1] \n" "fmla v27.8h, v17.8h, v1.h[3] \n" "fmla v28.8h, v17.8h, v1.h[5] \n" "fmla v29.8h, v17.8h, v1.h[7] \n" "fmla v30.8h, v17.8h, v2.h[1] \n" "fmla v31.8h, v17.8h, v2.h[3] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v24.8h, v18.8h, v0.h[6] \n" "fmla v25.8h, v18.8h, v1.h[0] \n" "fmla v26.8h, v18.8h, v1.h[2] \n" "fmla v27.8h, v18.8h, v1.h[4] \n" "fmla v28.8h, v18.8h, v1.h[6] \n" "fmla v30.8h, v18.8h, v2.h[2] \n" "fmla v29.8h, v18.8h, v2.h[0] \n" "fmla v31.8h, v18.8h, v2.h[4] \n" "fmla v24.8h, v19.8h, v4.h[0] \n" "fmla v25.8h, v19.8h, v4.h[2] \n" "fmla v26.8h, v19.8h, v4.h[4] \n" "fmla v27.8h, v19.8h, v4.h[6] \n" "fmla v28.8h, v19.8h, v5.h[0] \n" "fmla v29.8h, v19.8h, v5.h[2] \n" "fmla v30.8h, v19.8h, v5.h[4] \n" "fmla v31.8h, v19.8h, v5.h[6] \n" "fmla v24.8h, v20.8h, v4.h[1] \n" "fmla v25.8h, v20.8h, v4.h[3] \n" "fmla v26.8h, v20.8h, v4.h[5] \n" "fmla v27.8h, v20.8h, v4.h[7] \n" "fmla v28.8h, v20.8h, v5.h[1] \n" "fmla v29.8h, v20.8h, v5.h[3] \n" "fmla v30.8h, v20.8h, v5.h[5] \n" "fmla v31.8h, v20.8h, v5.h[7] \n" "fmla v24.8h, v21.8h, v4.h[2] \n" "fmla v25.8h, v21.8h, v4.h[4] \n" "fmla v26.8h, v21.8h, v4.h[6] \n" "fmla v27.8h, v21.8h, v5.h[0] \n" "fmla v28.8h, v21.8h, v5.h[2] \n" "fmla v29.8h, v21.8h, v5.h[4] \n" "fmla v30.8h, v21.8h, v5.h[6] \n" "fmla v31.8h, v21.8h, v6.h[0] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v24.8h, v22.8h, v4.h[3] \n" "fmla v25.8h, v22.8h, v4.h[5] \n" "fmla v26.8h, v22.8h, v4.h[7] \n" "fmla v27.8h, v22.8h, v5.h[1] \n" "fmla v28.8h, v22.8h, v5.h[3] \n" "fmla v29.8h, v22.8h, v5.h[5] \n" "fmla v30.8h, v22.8h, v5.h[7] \n" "fmla v31.8h, v22.8h, v6.h[1] \n" "fmla v24.8h, v23.8h, v4.h[4] \n" "fmla v25.8h, v23.8h, v4.h[6] \n" "fmla v26.8h, v23.8h, v5.h[0] \n" "fmla v27.8h, v23.8h, v5.h[2] \n" "fmla v28.8h, v23.8h, v5.h[4] \n" "fmla v29.8h, v23.8h, v5.h[6] \n" "fmla v30.8h, v23.8h, v6.h[0] \n" "fmla v31.8h, v23.8h, v6.h[2] \n" "prfm pldl1keep, [%7, #384] \n" "ld1 {v0.8h, v1.8h, v2.8h}, [%7] \n" // r6 "fmla v24.8h, v16.8h, v4.h[5] \n" "fmla v25.8h, v16.8h, v4.h[7] \n" "fmla v26.8h, v16.8h, v5.h[1] \n" "fmla v27.8h, v16.8h, v5.h[3] \n" "fmla v28.8h, v16.8h, v5.h[5] \n" "fmla v29.8h, v16.8h, v5.h[7] \n" "fmla v30.8h, v16.8h, v6.h[1] \n" "fmla v31.8h, v16.8h, v6.h[3] \n" "fmla v24.8h, v17.8h, v4.h[6] \n" "fmla v25.8h, v17.8h, v5.h[0] \n" "fmla v26.8h, v17.8h, v5.h[2] \n" "fmla v27.8h, v17.8h, v5.h[4] \n" "fmla v28.8h, v17.8h, v5.h[6] \n" "fmla v29.8h, v17.8h, v6.h[0] \n" "fmla v30.8h, v17.8h, v6.h[2] \n" "fmla v31.8h, v17.8h, v6.h[4] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v24.8h, v18.8h, v0.h[0] \n" "fmla v25.8h, v18.8h, v0.h[2] \n" "fmla v26.8h, v18.8h, v0.h[4] \n" "fmla v27.8h, v18.8h, v0.h[6] \n" "fmla v28.8h, v18.8h, v1.h[0] \n" "fmla v29.8h, v18.8h, v1.h[2] \n" "fmla v30.8h, v18.8h, v1.h[4] \n" "fmla v31.8h, v18.8h, v1.h[6] \n" "fmla v24.8h, v19.8h, v0.h[1] \n" "fmla v25.8h, v19.8h, v0.h[3] \n" "fmla v26.8h, v19.8h, v0.h[5] \n" "fmla v27.8h, v19.8h, v0.h[7] \n" "fmla v28.8h, v19.8h, v1.h[1] \n" "fmla v29.8h, v19.8h, v1.h[3] \n" "fmla v30.8h, v19.8h, v1.h[5] \n" "fmla v31.8h, v19.8h, v1.h[7] \n" "fmla v24.8h, v20.8h, v0.h[2] \n" "fmla v25.8h, v20.8h, v0.h[4] \n" "fmla v26.8h, v20.8h, v0.h[6] \n" "fmla v27.8h, v20.8h, v1.h[0] \n" "fmla v28.8h, v20.8h, v1.h[2] \n" "fmla v29.8h, v20.8h, v1.h[4] \n" "fmla v30.8h, v20.8h, v1.h[6] \n" "fmla v31.8h, v20.8h, v2.h[0] \n" "add %1, %1, #32 \n" "fmla v24.8h, v21.8h, v0.h[3] \n" "fmla v25.8h, v21.8h, v0.h[5] \n" "fmla v26.8h, v21.8h, v0.h[7] \n" "fmla v27.8h, v21.8h, v1.h[1] \n" "add %2, %2, #32 \n" "fmla v28.8h, v21.8h, v1.h[3] \n" "fmla v29.8h, v21.8h, v1.h[5] \n" "fmla v30.8h, v21.8h, v1.h[7] \n" "fmla v31.8h, v21.8h, v2.h[1] \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v16.8h}, [%8] \n" "fmla v24.8h, v22.8h, v0.h[4] \n" "fmla v25.8h, v22.8h, v0.h[6] \n" "fmla v26.8h, v22.8h, v1.h[0] \n" "fmla v27.8h, v22.8h, v1.h[2] \n" "add %3, %3, #32 \n" "fmla v28.8h, v22.8h, v1.h[4] \n" "fmla v29.8h, v22.8h, v1.h[6] \n" "fmla v30.8h, v22.8h, v2.h[0] \n" "fmla v31.8h, v22.8h, v2.h[2] \n" "add %4, %4, #32 \n" "fmla v24.8h, v23.8h, v0.h[5] \n" "fmla v25.8h, v23.8h, v0.h[7] \n" "fmla v26.8h, v23.8h, v1.h[1] \n" "fmla v27.8h, v23.8h, v1.h[3] \n" "add %5, %5, #32 \n" "fmla v28.8h, v23.8h, v1.h[5] \n" "fmla v29.8h, v23.8h, v1.h[7] \n" "fmla v30.8h, v23.8h, v2.h[1] \n" "fmla v31.8h, v23.8h, v2.h[3] \n" "add %6, %6, #32 \n" "fmla v24.8h, v16.8h, v0.h[6] \n" "fmla v25.8h, v16.8h, v1.h[0] \n" "fmla v26.8h, v16.8h, v1.h[2] \n" "fmla v27.8h, v16.8h, v1.h[4] \n" "add %7, %7, #32 \n" "fmla v28.8h, v16.8h, v1.h[6] \n" "fmla v29.8h, v16.8h, v2.h[0] \n" "fmla v30.8h, v16.8h, v2.h[2] \n" "fmla v31.8h, v16.8h, v2.h[4] \n" "sub %8, %8, #768 \n" // kptr -= 48 * 8; "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%0], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(r5), // %6 "=r"(r6), // %7 "=r"(kptr) // %8 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(r5), "7"(r6), "8"(kptr) : "memory", "v0", "v1", "v2", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0 "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.8h, v1.8h}, [%1] \n" // r0 "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v28.8h, v16.8h, v0.h[0] \n" "fmla v29.8h, v16.8h, v0.h[2] \n" "fmla v30.8h, v16.8h, v0.h[4] \n" "fmla v31.8h, v16.8h, v0.h[6] \n" "fmla v28.8h, v17.8h, v0.h[1] \n" "fmla v29.8h, v17.8h, v0.h[3] \n" "fmla v30.8h, v17.8h, v0.h[5] \n" "fmla v31.8h, v17.8h, v0.h[7] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v28.8h, v18.8h, v0.h[2] \n" "fmla v29.8h, v18.8h, v0.h[4] \n" "fmla v30.8h, v18.8h, v0.h[6] \n" "fmla v31.8h, v18.8h, v1.h[0] \n" "fmla v28.8h, v19.8h, v0.h[3] \n" "fmla v29.8h, v19.8h, v0.h[5] \n" "fmla v30.8h, v19.8h, v0.h[7] \n" "fmla v31.8h, v19.8h, v1.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v2.8h, v3.8h}, [%2] \n" // r1 "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v20.8h, v0.h[6] \n" "fmla v30.8h, v20.8h, v1.h[0] \n" "fmla v31.8h, v20.8h, v1.h[2] \n" "fmla v28.8h, v21.8h, v0.h[5] \n" "fmla v29.8h, v21.8h, v0.h[7] \n" "fmla v30.8h, v21.8h, v1.h[1] \n" "fmla v31.8h, v21.8h, v1.h[3] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v28.8h, v22.8h, v0.h[6] \n" "fmla v29.8h, v22.8h, v1.h[0] \n" "fmla v30.8h, v22.8h, v1.h[2] \n" "fmla v31.8h, v22.8h, v1.h[4] \n" "fmla v28.8h, v23.8h, v2.h[0] \n" "fmla v29.8h, v23.8h, v2.h[2] \n" "fmla v30.8h, v23.8h, v2.h[4] \n" "fmla v31.8h, v23.8h, v2.h[6] \n" "fmla v28.8h, v16.8h, v2.h[1] \n" "fmla v29.8h, v16.8h, v2.h[3] \n" "fmla v30.8h, v16.8h, v2.h[5] \n" "fmla v31.8h, v16.8h, v2.h[7] \n" "fmla v28.8h, v17.8h, v2.h[2] \n" "fmla v29.8h, v17.8h, v2.h[4] \n" "fmla v30.8h, v17.8h, v2.h[6] \n" "fmla v31.8h, v17.8h, v3.h[0] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v28.8h, v18.8h, v2.h[3] \n" "fmla v29.8h, v18.8h, v2.h[5] \n" "fmla v30.8h, v18.8h, v2.h[7] \n" "fmla v31.8h, v18.8h, v3.h[1] \n" "fmla v28.8h, v19.8h, v2.h[4] \n" "fmla v29.8h, v19.8h, v2.h[6] \n" "fmla v30.8h, v19.8h, v3.h[0] \n" "fmla v31.8h, v19.8h, v3.h[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.8h, v1.8h}, [%3] \n" // r2 "fmla v28.8h, v20.8h, v2.h[5] \n" "fmla v29.8h, v20.8h, v2.h[7] \n" "fmla v30.8h, v20.8h, v3.h[1] \n" "fmla v31.8h, v20.8h, v3.h[3] \n" "fmla v28.8h, v21.8h, v2.h[6] \n" "fmla v29.8h, v21.8h, v3.h[0] \n" "fmla v30.8h, v21.8h, v3.h[2] \n" "fmla v31.8h, v21.8h, v3.h[4] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v28.8h, v22.8h, v0.h[0] \n" "fmla v29.8h, v22.8h, v0.h[2] \n" "fmla v30.8h, v22.8h, v0.h[4] \n" "fmla v31.8h, v22.8h, v0.h[6] \n" "fmla v28.8h, v23.8h, v0.h[1] \n" "fmla v29.8h, v23.8h, v0.h[3] \n" "fmla v30.8h, v23.8h, v0.h[5] \n" "fmla v31.8h, v23.8h, v0.h[7] \n" "fmla v28.8h, v16.8h, v0.h[2] \n" "fmla v29.8h, v16.8h, v0.h[4] \n" "fmla v30.8h, v16.8h, v0.h[6] \n" "fmla v31.8h, v16.8h, v1.h[0] \n" "fmla v28.8h, v17.8h, v0.h[3] \n" "fmla v29.8h, v17.8h, v0.h[5] \n" "fmla v30.8h, v17.8h, v0.h[7] \n" "fmla v31.8h, v17.8h, v1.h[1] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v28.8h, v18.8h, v0.h[4] \n" "fmla v29.8h, v18.8h, v0.h[6] \n" "fmla v30.8h, v18.8h, v1.h[0] \n" "fmla v31.8h, v18.8h, v1.h[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v2.8h, v3.8h}, [%4] \n" // r3 "fmla v28.8h, v19.8h, v0.h[5] \n" "fmla v29.8h, v19.8h, v0.h[7] \n" "fmla v30.8h, v19.8h, v1.h[1] \n" "fmla v31.8h, v19.8h, v1.h[3] \n" "fmla v28.8h, v20.8h, v0.h[6] \n" "fmla v29.8h, v20.8h, v1.h[0] \n" "fmla v30.8h, v20.8h, v1.h[2] \n" "fmla v31.8h, v20.8h, v1.h[4] \n" "fmla v28.8h, v21.8h, v2.h[0] \n" "fmla v29.8h, v21.8h, v2.h[2] \n" "fmla v30.8h, v21.8h, v2.h[4] \n" "fmla v31.8h, v21.8h, v2.h[6] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v28.8h, v22.8h, v2.h[1] \n" "fmla v29.8h, v22.8h, v2.h[3] \n" "fmla v30.8h, v22.8h, v2.h[5] \n" "fmla v31.8h, v22.8h, v2.h[7] \n" "fmla v28.8h, v23.8h, v2.h[2] \n" "fmla v29.8h, v23.8h, v2.h[4] \n" "fmla v30.8h, v23.8h, v2.h[6] \n" "fmla v31.8h, v23.8h, v3.h[0] \n" "fmla v28.8h, v16.8h, v2.h[3] \n" "fmla v29.8h, v16.8h, v2.h[5] \n" "fmla v30.8h, v16.8h, v2.h[7] \n" "fmla v31.8h, v16.8h, v3.h[1] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.8h, v1.8h}, [%5] \n" // r4 "fmla v28.8h, v17.8h, v2.h[4] \n" "fmla v29.8h, v17.8h, v2.h[6] \n" "fmla v30.8h, v17.8h, v3.h[0] \n" "fmla v31.8h, v17.8h, v3.h[2] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v28.8h, v18.8h, v2.h[5] \n" "fmla v29.8h, v18.8h, v2.h[7] \n" "fmla v30.8h, v18.8h, v3.h[1] \n" "fmla v31.8h, v18.8h, v3.h[3] \n" "fmla v28.8h, v19.8h, v2.h[6] \n" "fmla v29.8h, v19.8h, v3.h[0] \n" "fmla v30.8h, v19.8h, v3.h[2] \n" "fmla v31.8h, v19.8h, v3.h[4] \n" "fmla v28.8h, v20.8h, v0.h[0] \n" "fmla v29.8h, v20.8h, v0.h[2] \n" "fmla v30.8h, v20.8h, v0.h[4] \n" "fmla v31.8h, v20.8h, v0.h[6] \n" "fmla v28.8h, v21.8h, v0.h[1] \n" "fmla v29.8h, v21.8h, v0.h[3] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v31.8h, v21.8h, v0.h[7] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v28.8h, v22.8h, v0.h[2] \n" "fmla v29.8h, v22.8h, v0.h[4] \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v31.8h, v22.8h, v1.h[0] \n" "fmla v28.8h, v23.8h, v0.h[3] \n" "fmla v29.8h, v23.8h, v0.h[5] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "fmla v31.8h, v23.8h, v1.h[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v2.8h, v3.8h}, [%6] \n" // r5 "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v16.8h, v0.h[6] \n" "fmla v30.8h, v16.8h, v1.h[0] \n" "fmla v31.8h, v16.8h, v1.h[2] \n" "fmla v28.8h, v17.8h, v0.h[5] \n" "fmla v29.8h, v17.8h, v0.h[7] \n" "fmla v30.8h, v17.8h, v1.h[1] \n" "fmla v31.8h, v17.8h, v1.h[3] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v28.8h, v18.8h, v0.h[6] \n" "fmla v29.8h, v18.8h, v1.h[0] \n" "fmla v30.8h, v18.8h, v1.h[2] \n" "fmla v31.8h, v18.8h, v1.h[4] \n" "fmla v28.8h, v19.8h, v2.h[0] \n" "fmla v29.8h, v19.8h, v2.h[2] \n" "fmla v30.8h, v19.8h, v2.h[4] \n" "fmla v31.8h, v19.8h, v2.h[6] \n" "fmla v28.8h, v20.8h, v2.h[1] \n" "fmla v29.8h, v20.8h, v2.h[3] \n" "fmla v30.8h, v20.8h, v2.h[5] \n" "fmla v31.8h, v20.8h, v2.h[7] \n" "fmla v28.8h, v21.8h, v2.h[2] \n" "fmla v29.8h, v21.8h, v2.h[4] \n" "fmla v30.8h, v21.8h, v2.h[6] \n" "fmla v31.8h, v21.8h, v3.h[0] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v28.8h, v22.8h, v2.h[3] \n" "fmla v29.8h, v22.8h, v2.h[5] \n" "fmla v30.8h, v22.8h, v2.h[7] \n" "fmla v31.8h, v22.8h, v3.h[1] \n" "add %1, %1, #16 \n" "fmla v28.8h, v23.8h, v2.h[4] \n" "fmla v29.8h, v23.8h, v2.h[6] \n" "fmla v30.8h, v23.8h, v3.h[0] \n" "fmla v31.8h, v23.8h, v3.h[2] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v0.8h, v1.8h}, [%7] \n" // r6 "fmla v28.8h, v16.8h, v2.h[5] \n" "fmla v29.8h, v16.8h, v2.h[7] \n" "fmla v30.8h, v16.8h, v3.h[1] \n" "fmla v31.8h, v16.8h, v3.h[3] \n" "add %2, %2, #16 \n" "fmla v28.8h, v17.8h, v2.h[6] \n" "fmla v29.8h, v17.8h, v3.h[0] \n" "fmla v30.8h, v17.8h, v3.h[2] \n" "fmla v31.8h, v17.8h, v3.h[4] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v28.8h, v18.8h, v0.h[0] \n" "fmla v29.8h, v18.8h, v0.h[2] \n" "fmla v30.8h, v18.8h, v0.h[4] \n" "fmla v31.8h, v18.8h, v0.h[6] \n" "add %3, %3, #16 \n" "fmla v28.8h, v19.8h, v0.h[1] \n" "fmla v29.8h, v19.8h, v0.h[3] \n" "fmla v30.8h, v19.8h, v0.h[5] \n" "fmla v31.8h, v19.8h, v0.h[7] \n" "add %4, %4, #16 \n" "fmla v28.8h, v20.8h, v0.h[2] \n" "fmla v29.8h, v20.8h, v0.h[4] \n" "fmla v30.8h, v20.8h, v0.h[6] \n" "fmla v31.8h, v20.8h, v1.h[0] \n" "add %5, %5, #16 \n" "fmla v28.8h, v21.8h, v0.h[3] \n" "fmla v29.8h, v21.8h, v0.h[5] \n" "fmla v30.8h, v21.8h, v0.h[7] \n" "fmla v31.8h, v21.8h, v1.h[1] \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v16.8h}, [%8] \n" "fmla v28.8h, v22.8h, v0.h[4] \n" "fmla v29.8h, v22.8h, v0.h[6] \n" "fmla v30.8h, v22.8h, v1.h[0] \n" "fmla v31.8h, v22.8h, v1.h[2] \n" "add %6, %6, #16 \n" "fmla v28.8h, v23.8h, v0.h[5] \n" "fmla v29.8h, v23.8h, v0.h[7] \n" "fmla v30.8h, v23.8h, v1.h[1] \n" "fmla v31.8h, v23.8h, v1.h[3] \n" "add %7, %7, #16 \n" "fmla v28.8h, v16.8h, v0.h[6] \n" "fmla v29.8h, v16.8h, v1.h[0] \n" "fmla v30.8h, v16.8h, v1.h[2] \n" "fmla v31.8h, v16.8h, v1.h[4] \n" "sub %8, %8, #768 \n" // kptr -= 48 * 8; "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(r5), // %6 "=r"(r6), // %7 "=r"(kptr) // %8 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(r5), "7"(r6), "8"(kptr) : "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.8h}, [%1] \n" // r0 "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v31.8h}, [%0] \n" // sum0 "fmul v28.8h, v16.8h, v0.h[0] \n" "fmul v29.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmul v30.8h, v18.8h, v0.h[2] \n" "fmla v31.8h, v19.8h, v0.h[3] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v1.8h}, [%2] \n" // r1 "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v21.8h, v0.h[5] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v31.8h, v23.8h, v1.h[0] \n" "fmla v28.8h, v16.8h, v1.h[1] \n" "fmla v29.8h, v17.8h, v1.h[2] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v30.8h, v18.8h, v1.h[3] \n" "fmla v31.8h, v19.8h, v1.h[4] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.8h}, [%3] \n" // r2 "fmla v28.8h, v20.8h, v1.h[5] \n" "fmla v29.8h, v21.8h, v1.h[6] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v30.8h, v22.8h, v0.h[0] \n" "fmla v31.8h, v23.8h, v0.h[1] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v1.8h}, [%4] \n" // r3 "fmla v28.8h, v16.8h, v0.h[2] \n" "fmla v29.8h, v17.8h, v0.h[3] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v30.8h, v18.8h, v0.h[4] \n" "fmla v31.8h, v19.8h, v0.h[5] \n" "add %1, %1, #4 \n" "fmla v28.8h, v20.8h, v0.h[6] \n" "fmla v29.8h, v21.8h, v1.h[0] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v30.8h, v22.8h, v1.h[1] \n" "fmla v31.8h, v23.8h, v1.h[2] \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.8h}, [%5] \n" // r4 "fmla v28.8h, v16.8h, v1.h[3] \n" "fmla v29.8h, v17.8h, v1.h[4] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v30.8h, v18.8h, v1.h[5] \n" "fmla v31.8h, v19.8h, v1.h[6] \n" "add %2, %2, #4 \n" "fmla v28.8h, v20.8h, v0.h[0] \n" "fmla v29.8h, v21.8h, v0.h[1] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v30.8h, v22.8h, v0.h[2] \n" "fmla v31.8h, v23.8h, v0.h[3] \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v1.8h}, [%6] \n" // r5 "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v17.8h, v0.h[5] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v30.8h, v18.8h, v0.h[6] \n" "fmla v31.8h, v19.8h, v1.h[0] \n" "add %3, %3, #4 \n" "fmla v28.8h, v20.8h, v1.h[1] \n" "fmla v29.8h, v21.8h, v1.h[2] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v30.8h, v22.8h, v1.h[3] \n" "fmla v31.8h, v23.8h, v1.h[4] \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v0.8h}, [%7] \n" // r6 "fmla v28.8h, v16.8h, v1.h[5] \n" "fmla v29.8h, v17.8h, v1.h[6] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v30.8h, v18.8h, v0.h[0] \n" "fmla v31.8h, v19.8h, v0.h[1] \n" "add %4, %4, #4 \n" "fmla v28.8h, v20.8h, v0.h[2] \n" "fmla v29.8h, v21.8h, v0.h[3] \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v16.8h}, [%8] \n" "fmla v30.8h, v22.8h, v0.h[4] \n" "fmla v31.8h, v23.8h, v0.h[5] \n" "add %5, %5, #4 \n" "fmla v28.8h, v16.8h, v0.h[6] \n" "add %6, %6, #4 \n" "fadd v29.8h, v29.8h, v30.8h \n" "fadd v31.8h, v31.8h, v28.8h \n" "add %7, %7, #4 \n" "fadd v29.8h, v29.8h, v31.8h \n" "sub %8, %8, #768 \n" // kptr -= 48 * 8; "st1 {v29.8h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(r5), // %6 "=r"(r6), // %7 "=r"(kptr) // %8 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(r5), "7"(r6), "8"(kptr) : "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31"); } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; r5 += tailstep; r6 += tailstep; } } } }
core.c
/* Main solver routines for heat equation solver */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <mpi.h> #include "heat.h" /* Exchange the boundary values */ void exchange(field *temperature, parallel_data *parallel) { MPI_Request reqs[4]; // Send to the up, receive from down MPI_Isend(temperature->data[1], temperature->ny + 2, MPI_DOUBLE, parallel->nup, 11, MPI_COMM_WORLD, &reqs[0]); MPI_Irecv(temperature->data[temperature->nx + 1], temperature->ny + 2, MPI_DOUBLE, parallel->ndown, 11, MPI_COMM_WORLD, &reqs[1]); // Send to the down, receive from up MPI_Isend(temperature->data[temperature->nx], temperature->ny + 2, MPI_DOUBLE, parallel->ndown, 12, MPI_COMM_WORLD, &reqs[2]); MPI_Irecv(temperature->data[0], temperature->ny + 2, MPI_DOUBLE, parallel->nup, 12, MPI_COMM_WORLD, &reqs[3]); MPI_Waitall(4, reqs, MPI_STATUSES_IGNORE); } /* Update the temperature values using five-point stencil */ void evolve(field *curr, field *prev, double a, double dt) { int i, j; double dx2, dy2; /* Determine the temperature field at next time step * As we have fixed boundary conditions, the outermost gridpoints * are not updated. */ dx2 = prev->dx * prev->dx; dy2 = prev->dy * prev->dy; #pragma omp parallel for private(i, j) for (i = 1; i < curr->nx + 1; i++) { for (j = 1; j < curr->ny + 1; j++) { curr->data[i][j] = prev->data[i][j] + a * dt * ((prev->data[i + 1][j] - 2.0 * prev->data[i][j] + prev->data[i - 1][j]) / dx2 + (prev->data[i][j + 1] - 2.0 * prev->data[i][j] + prev->data[i][j - 1]) / dy2); } } }
loop.c
#include <stdio.h> #define N 100 int main() { float x[N], y[N]; float a = 2.0; for(int i=0;i<N;i++){ x[i]=i; y[i]=0;} // initialize #pragma omp parallel { #pragma omp loop bind(parallel) for(int i = 0; i < N; ++i) y[i] = a*x[i] + y[i]; } if(y[N-1] != (N-1)*2.0) printf("Error: 2*(N-1) != y[N-1]=%f",y[N-1]); }
GB_unaryop__ainv_int64_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int64_int32 // op(A') function: GB_tran__ainv_int64_int32 // C type: int64_t // A type: int32_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int64_int32 ( int64_t *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int64_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
5cfe06b37d3f6d74b44e1f86074c47ce092d5c2b.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "omp.h" struct dataobj { void *restrict data; int * size; int * npsize; int * dsize; int * hsize; int * hofs; int * oofs; } ; struct profiler { double section0; double section1; double section2; double section3; } ; int ForwardTTI(struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, const float o_x, const float o_y, const float o_z, struct dataobj *restrict phi_vec, struct dataobj *restrict rec_vec, struct dataobj *restrict rec_coords_vec, struct dataobj *restrict src_vec, struct dataobj *restrict src_coords_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_M, const int x_m, const int x_size, const int y_M, const int y_m, const int y_size, const int z_M, const int z_m, const int z_size, const int p_rec_M, const int p_rec_m, const int p_src_M, const int p_src_m, const int time_M, const int time_m, struct profiler * timers) { float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data; float (*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[delta_vec->size[1]][delta_vec->size[2]]) delta_vec->data; float (*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[epsilon_vec->size[1]][epsilon_vec->size[2]]) epsilon_vec->data; float (*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[phi_vec->size[1]][phi_vec->size[2]]) phi_vec->data; float (*restrict rec)[rec_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_vec->size[1]]) rec_vec->data; float (*restrict rec_coords)[rec_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_coords_vec->size[1]]) rec_coords_vec->data; float (*restrict src)[src_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_vec->size[1]]) src_vec->data; float (*restrict src_coords)[src_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_coords_vec->size[1]]) src_coords_vec->data; float (*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[theta_vec->size[1]][theta_vec->size[2]]) theta_vec->data; float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data; float (*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]]) v_vec->data; float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data; float (*r184)[y_size + 3 + 3][z_size + 3 + 3]; posix_memalign((void**)&r184, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3])); #pragma omp target enter data map(alloc: r184[0:x_size + 3 + 3][0:y_size + 3 + 3][0:z_size + 3 + 3]) float (*r185)[y_size + 3 + 3][z_size + 3 + 3]; posix_memalign((void**)&r185, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3])); #pragma omp target enter data map(alloc: r185[0:x_size + 3 + 3][0:y_size + 3 + 3][0:z_size + 3 + 3]) float (*r186)[y_size + 3 + 3][z_size + 3 + 3]; posix_memalign((void**)&r186, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3])); #pragma omp target enter data map(alloc: r186[0:x_size + 3 + 3][0:y_size + 3 + 3][0:z_size + 3 + 3]) float (*r187)[y_size + 3 + 3][z_size + 3 + 3]; posix_memalign((void**)&r187, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3])); #pragma omp target enter data map(alloc: r187[0:x_size + 3 + 3][0:y_size + 3 + 3][0:z_size + 3 + 3]) float (*r188)[y_size + 3 + 3][z_size + 3 + 3]; posix_memalign((void**)&r188, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3])); #pragma omp target enter data map(alloc: r188[0:x_size + 3 + 3][0:y_size + 3 + 3][0:z_size + 3 + 3]) float (*r236)[y_size + 3 + 3][z_size + 3 + 3]; posix_memalign((void**)&r236, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3])); #pragma omp target enter data map(alloc: r236[0:x_size + 3 + 3][0:y_size + 3 + 3][0:z_size + 3 + 3]) float (*r237)[y_size + 3 + 3][z_size + 3 + 3]; posix_memalign((void**)&r237, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3])); #pragma omp target enter data map(alloc: r237[0:x_size + 3 + 3][0:y_size + 3 + 3][0:z_size + 3 + 3]) #pragma omp target enter data map(to: rec[0:rec_vec->size[0]][0:rec_vec->size[1]]) #pragma omp target enter data map(to: u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]]) #pragma omp target enter data map(to: v[0:v_vec->size[0]][0:v_vec->size[1]][0:v_vec->size[2]][0:v_vec->size[3]]) #pragma omp target enter data map(to: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]]) #pragma omp target enter data map(to: delta[0:delta_vec->size[0]][0:delta_vec->size[1]][0:delta_vec->size[2]]) #pragma omp target enter data map(to: epsilon[0:epsilon_vec->size[0]][0:epsilon_vec->size[1]][0:epsilon_vec->size[2]]) #pragma omp target enter data map(to: phi[0:phi_vec->size[0]][0:phi_vec->size[1]][0:phi_vec->size[2]]) #pragma omp target enter data map(to: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]]) #pragma omp target enter data map(to: src[0:src_vec->size[0]][0:src_vec->size[1]]) #pragma omp target enter data map(to: src_coords[0:src_coords_vec->size[0]][0:src_coords_vec->size[1]]) #pragma omp target enter data map(to: theta[0:theta_vec->size[0]][0:theta_vec->size[1]][0:theta_vec->size[2]]) #pragma omp target enter data map(to: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]]) struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ #pragma omp target teams distribute parallel for collapse(3) for (int x = x_m - 3; x <= x_M + 3; x += 1) { for (int y = y_m - 3; y <= y_M + 3; y += 1) { for (int z = z_m - 3; z <= z_M + 3; z += 1) { r184[x + 3][y + 3][z + 3] = sqrt(2*delta[x + 2][y + 2][z + 2] + 1); r185[x + 3][y + 3][z + 3] = cos(theta[x + 2][y + 2][z + 2]); r186[x + 3][y + 3][z + 3] = sin(phi[x + 2][y + 2][z + 2]); r187[x + 3][y + 3][z + 3] = sin(theta[x + 2][y + 2][z + 2]); r188[x + 3][y + 3][z + 3] = cos(phi[x + 2][y + 2][z + 2]); } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000; for (int time = time_m, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3); time <= time_M; time += 1, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3)) { struct timeval start_section1, end_section1; gettimeofday(&start_section1, NULL); /* Begin section1 */ #pragma omp target teams distribute parallel for collapse(3) for (int x = x_m - 3; x <= x_M + 3; x += 1) { for (int y = y_m - 3; y <= y_M + 3; y += 1) { for (int z = z_m - 3; z <= z_M + 3; z += 1) { float r267 = 8.33333346e-4F*(-v[t0][x + 12][y + 12][z + 9] + v[t0][x + 12][y + 12][z + 15]) + 7.50000011e-3F*(v[t0][x + 12][y + 12][z + 10] - v[t0][x + 12][y + 12][z + 14]) + 3.75000006e-2F*(-v[t0][x + 12][y + 12][z + 11] + v[t0][x + 12][y + 12][z + 13]); float r268 = 8.33333346e-4F*(-v[t0][x + 12][y + 9][z + 12] + v[t0][x + 12][y + 15][z + 12]) + 7.50000011e-3F*(v[t0][x + 12][y + 10][z + 12] - v[t0][x + 12][y + 14][z + 12]) + 3.75000006e-2F*(-v[t0][x + 12][y + 11][z + 12] + v[t0][x + 12][y + 13][z + 12]); float r269 = 8.33333346e-4F*(-v[t0][x + 9][y + 12][z + 12] + v[t0][x + 15][y + 12][z + 12]) + 7.50000011e-3F*(v[t0][x + 10][y + 12][z + 12] - v[t0][x + 14][y + 12][z + 12]) + 3.75000006e-2F*(-v[t0][x + 11][y + 12][z + 12] + v[t0][x + 13][y + 12][z + 12]); float r270 = 8.33333346e-4F*(-u[t0][x + 12][y + 12][z + 9] + u[t0][x + 12][y + 12][z + 15]) + 7.50000011e-3F*(u[t0][x + 12][y + 12][z + 10] - u[t0][x + 12][y + 12][z + 14]) + 3.75000006e-2F*(-u[t0][x + 12][y + 12][z + 11] + u[t0][x + 12][y + 12][z + 13]); float r271 = 8.33333346e-4F*(-u[t0][x + 12][y + 9][z + 12] + u[t0][x + 12][y + 15][z + 12]) + 7.50000011e-3F*(u[t0][x + 12][y + 10][z + 12] - u[t0][x + 12][y + 14][z + 12]) + 3.75000006e-2F*(-u[t0][x + 12][y + 11][z + 12] + u[t0][x + 12][y + 13][z + 12]); float r272 = 8.33333346e-4F*(-u[t0][x + 9][y + 12][z + 12] + u[t0][x + 15][y + 12][z + 12]) + 7.50000011e-3F*(u[t0][x + 10][y + 12][z + 12] - u[t0][x + 14][y + 12][z + 12]) + 3.75000006e-2F*(-u[t0][x + 11][y + 12][z + 12] + u[t0][x + 13][y + 12][z + 12]); r236[x + 3][y + 3][z + 3] = -(r270*r185[x + 3][y + 3][z + 3] + r271*r186[x + 3][y + 3][z + 3]*r187[x + 3][y + 3][z + 3] + r272*r187[x + 3][y + 3][z + 3]*r188[x + 3][y + 3][z + 3]); r237[x + 3][y + 3][z + 3] = -(r267*r185[x + 3][y + 3][z + 3] + r268*r186[x + 3][y + 3][z + 3]*r187[x + 3][y + 3][z + 3] + r269*r187[x + 3][y + 3][z + 3]*r188[x + 3][y + 3][z + 3]); } } } #pragma omp target teams distribute parallel for collapse(3) for (int x = x_m; x <= x_M; x += 1) { for (int y = y_m; y <= y_M; y += 1) { for (int z = z_m; z <= z_M; z += 1) { float r250 = dt*dt; float r249 = dt*damp[x + 1][y + 1][z + 1]; float r248 = 7.50000011e-3F*(r185[x + 3][y + 3][z + 1]*r236[x + 3][y + 3][z + 1] - r185[x + 3][y + 3][z + 5]*r236[x + 3][y + 3][z + 5] + r186[x + 3][y + 1][z + 3]*r187[x + 3][y + 1][z + 3]*r236[x + 3][y + 1][z + 3] - r186[x + 3][y + 5][z + 3]*r187[x + 3][y + 5][z + 3]*r236[x + 3][y + 5][z + 3] + r187[x + 1][y + 3][z + 3]*r188[x + 1][y + 3][z + 3]*r236[x + 1][y + 3][z + 3] - r187[x + 5][y + 3][z + 3]*r188[x + 5][y + 3][z + 3]*r236[x + 5][y + 3][z + 3]); float r247 = 8.33333346e-4F*(-r185[x + 3][y + 3][z]*r236[x + 3][y + 3][z] + r185[x + 3][y + 3][z + 6]*r236[x + 3][y + 3][z + 6] - r186[x + 3][y][z + 3]*r187[x + 3][y][z + 3]*r236[x + 3][y][z + 3] + r186[x + 3][y + 6][z + 3]*r187[x + 3][y + 6][z + 3]*r236[x + 3][y + 6][z + 3] - r187[x][y + 3][z + 3]*r188[x][y + 3][z + 3]*r236[x][y + 3][z + 3] + r187[x + 6][y + 3][z + 3]*r188[x + 6][y + 3][z + 3]*r236[x + 6][y + 3][z + 3]); float r246 = 3.75000006e-2F*(-r185[x + 3][y + 3][z + 2]*r236[x + 3][y + 3][z + 2] + r185[x + 3][y + 3][z + 4]*r236[x + 3][y + 3][z + 4] - r186[x + 3][y + 2][z + 3]*r187[x + 3][y + 2][z + 3]*r236[x + 3][y + 2][z + 3] + r186[x + 3][y + 4][z + 3]*r187[x + 3][y + 4][z + 3]*r236[x + 3][y + 4][z + 3] - r187[x + 2][y + 3][z + 3]*r188[x + 2][y + 3][z + 3]*r236[x + 2][y + 3][z + 3] + r187[x + 4][y + 3][z + 3]*r188[x + 4][y + 3][z + 3]*r236[x + 4][y + 3][z + 3]); float r245 = 1.0/(vp[x + 2][y + 2][z + 2]*vp[x + 2][y + 2][z + 2]); float r244 = 1.0/(vp[x + 2][y + 2][z + 2]*vp[x + 2][y + 2][z + 2]); float r238 = 2.0F*r244 + r249; float r239 = -2.0F*r244 + r249; float r240 = -1.50312647e-7F*(u[t0][x + 6][y + 12][z + 12] + u[t0][x + 12][y + 6][z + 12] + u[t0][x + 12][y + 12][z + 6] + u[t0][x + 12][y + 12][z + 18] + u[t0][x + 12][y + 18][z + 12] + u[t0][x + 18][y + 12][z + 12]) + 2.59740254e-6F*(u[t0][x + 7][y + 12][z + 12] + u[t0][x + 12][y + 7][z + 12] + u[t0][x + 12][y + 12][z + 7] + u[t0][x + 12][y + 12][z + 17] + u[t0][x + 12][y + 17][z + 12] + u[t0][x + 17][y + 12][z + 12]) - 2.23214281e-5F*(u[t0][x + 8][y + 12][z + 12] + u[t0][x + 12][y + 8][z + 12] + u[t0][x + 12][y + 12][z + 8] + u[t0][x + 12][y + 12][z + 16] + u[t0][x + 12][y + 16][z + 12] + u[t0][x + 16][y + 12][z + 12]) + 1.32275129e-4F*(u[t0][x + 9][y + 12][z + 12] + u[t0][x + 12][y + 9][z + 12] + u[t0][x + 12][y + 12][z + 9] + u[t0][x + 12][y + 12][z + 15] + u[t0][x + 12][y + 15][z + 12] + u[t0][x + 15][y + 12][z + 12]) - 6.69642842e-4F*(u[t0][x + 10][y + 12][z + 12] + u[t0][x + 12][y + 10][z + 12] + u[t0][x + 12][y + 12][z + 10] + u[t0][x + 12][y + 12][z + 14] + u[t0][x + 12][y + 14][z + 12] + u[t0][x + 14][y + 12][z + 12]) + 4.28571419e-3F*(u[t0][x + 11][y + 12][z + 12] + u[t0][x + 12][y + 11][z + 12] + u[t0][x + 12][y + 12][z + 11] + u[t0][x + 12][y + 12][z + 13] + u[t0][x + 12][y + 13][z + 12] + u[t0][x + 13][y + 12][z + 12]) - 2.23708328e-2F*u[t0][x + 12][y + 12][z + 12]; float r241 = r239*u[t2][x + 12][y + 12][z + 12] + 4.0F*r245*u[t0][x + 12][y + 12][z + 12]; float r242 = r239*v[t2][x + 12][y + 12][z + 12] + 4.0F*r245*v[t0][x + 12][y + 12][z + 12]; float r243 = 8.33333346e-4F*(r185[x + 3][y + 3][z]*r237[x + 3][y + 3][z] - r185[x + 3][y + 3][z + 6]*r237[x + 3][y + 3][z + 6] + r186[x + 3][y][z + 3]*r187[x + 3][y][z + 3]*r237[x + 3][y][z + 3] - r186[x + 3][y + 6][z + 3]*r187[x + 3][y + 6][z + 3]*r237[x + 3][y + 6][z + 3] + r187[x][y + 3][z + 3]*r188[x][y + 3][z + 3]*r237[x][y + 3][z + 3] - r187[x + 6][y + 3][z + 3]*r188[x + 6][y + 3][z + 3]*r237[x + 6][y + 3][z + 3]) + 7.50000011e-3F*(-r185[x + 3][y + 3][z + 1]*r237[x + 3][y + 3][z + 1] + r185[x + 3][y + 3][z + 5]*r237[x + 3][y + 3][z + 5] - r186[x + 3][y + 1][z + 3]*r187[x + 3][y + 1][z + 3]*r237[x + 3][y + 1][z + 3] + r186[x + 3][y + 5][z + 3]*r187[x + 3][y + 5][z + 3]*r237[x + 3][y + 5][z + 3] - r187[x + 1][y + 3][z + 3]*r188[x + 1][y + 3][z + 3]*r237[x + 1][y + 3][z + 3] + r187[x + 5][y + 3][z + 3]*r188[x + 5][y + 3][z + 3]*r237[x + 5][y + 3][z + 3]) + 3.75000006e-2F*(r185[x + 3][y + 3][z + 2]*r237[x + 3][y + 3][z + 2] - r185[x + 3][y + 3][z + 4]*r237[x + 3][y + 3][z + 4] + r186[x + 3][y + 2][z + 3]*r187[x + 3][y + 2][z + 3]*r237[x + 3][y + 2][z + 3] - r186[x + 3][y + 4][z + 3]*r187[x + 3][y + 4][z + 3]*r237[x + 3][y + 4][z + 3] + r187[x + 2][y + 3][z + 3]*r188[x + 2][y + 3][z + 3]*r237[x + 2][y + 3][z + 3] - r187[x + 4][y + 3][z + 3]*r188[x + 4][y + 3][z + 3]*r237[x + 4][y + 3][z + 3]); u[t1][x + 12][y + 12][z + 12] = 1.0F*(r241 + 2.0F*r250*(r243*r184[x + 3][y + 3][z + 3] + (2*epsilon[x + 2][y + 2][z + 2] + 1)*(r240 + r246 + r247 + r248)))/r238; v[t1][x + 12][y + 12][z + 12] = 1.0F*(r242 + 2.0F*r250*(r243 + (r240 + r246 + r247 + r248)*r184[x + 3][y + 3][z + 3]))/r238; } } } /* End section1 */ gettimeofday(&end_section1, NULL); timers->section1 += (double)(end_section1.tv_sec-start_section1.tv_sec)+(double)(end_section1.tv_usec-start_section1.tv_usec)/1000000; struct timeval start_section2, end_section2; gettimeofday(&start_section2, NULL); /* Begin section2 */ #pragma omp target teams distribute parallel for collapse(1) for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1) { int ii_src_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0])); int ii_src_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1])); int ii_src_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2])); int ii_src_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2])) + 1; int ii_src_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1])) + 1; int ii_src_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0])) + 1; float px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*src_coords[p_src][0])) + src_coords[p_src][0]); float py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*src_coords[p_src][1])) + src_coords[p_src][1]); float pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*src_coords[p_src][2])) + src_coords[p_src][2]); if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1) { float r251 = (dt*dt)*(vp[ii_src_0 + 2][ii_src_1 + 2][ii_src_2 + 2]*vp[ii_src_0 + 2][ii_src_1 + 2][ii_src_2 + 2])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12] += r251; } if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1) { float r252 = (dt*dt)*(vp[ii_src_0 + 2][ii_src_1 + 2][ii_src_3 + 2]*vp[ii_src_0 + 2][ii_src_1 + 2][ii_src_3 + 2])*(1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12] += r252; } if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r253 = (dt*dt)*(vp[ii_src_0 + 2][ii_src_4 + 2][ii_src_2 + 2]*vp[ii_src_0 + 2][ii_src_4 + 2][ii_src_2 + 2])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12] += r253; } if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r254 = (dt*dt)*(vp[ii_src_0 + 2][ii_src_4 + 2][ii_src_3 + 2]*vp[ii_src_0 + 2][ii_src_4 + 2][ii_src_3 + 2])*(-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12] += r254; } if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r255 = (dt*dt)*(vp[ii_src_5 + 2][ii_src_1 + 2][ii_src_2 + 2]*vp[ii_src_5 + 2][ii_src_1 + 2][ii_src_2 + 2])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12] += r255; } if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r256 = (dt*dt)*(vp[ii_src_5 + 2][ii_src_1 + 2][ii_src_3 + 2]*vp[ii_src_5 + 2][ii_src_1 + 2][ii_src_3 + 2])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12] += r256; } if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r257 = (dt*dt)*(vp[ii_src_5 + 2][ii_src_4 + 2][ii_src_2 + 2]*vp[ii_src_5 + 2][ii_src_4 + 2][ii_src_2 + 2])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12] += r257; } if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r258 = 1.25e-4F*px*py*pz*(dt*dt)*(vp[ii_src_5 + 2][ii_src_4 + 2][ii_src_3 + 2]*vp[ii_src_5 + 2][ii_src_4 + 2][ii_src_3 + 2])*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12] += r258; } ii_src_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0])); ii_src_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1])); ii_src_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2])); ii_src_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2])) + 1; ii_src_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1])) + 1; ii_src_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0])) + 1; px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*src_coords[p_src][0])) + src_coords[p_src][0]); py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*src_coords[p_src][1])) + src_coords[p_src][1]); pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*src_coords[p_src][2])) + src_coords[p_src][2]); if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1) { float r259 = (dt*dt)*(vp[ii_src_0 + 2][ii_src_1 + 2][ii_src_2 + 2]*vp[ii_src_0 + 2][ii_src_1 + 2][ii_src_2 + 2])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1)*src[time][p_src]; #pragma omp atomic update v[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12] += r259; } if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1) { float r260 = (dt*dt)*(vp[ii_src_0 + 2][ii_src_1 + 2][ii_src_3 + 2]*vp[ii_src_0 + 2][ii_src_1 + 2][ii_src_3 + 2])*(1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz)*src[time][p_src]; #pragma omp atomic update v[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12] += r260; } if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r261 = (dt*dt)*(vp[ii_src_0 + 2][ii_src_4 + 2][ii_src_2 + 2]*vp[ii_src_0 + 2][ii_src_4 + 2][ii_src_2 + 2])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py)*src[time][p_src]; #pragma omp atomic update v[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12] += r261; } if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r262 = (dt*dt)*(vp[ii_src_0 + 2][ii_src_4 + 2][ii_src_3 + 2]*vp[ii_src_0 + 2][ii_src_4 + 2][ii_src_3 + 2])*(-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*src[time][p_src]; #pragma omp atomic update v[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12] += r262; } if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r263 = (dt*dt)*(vp[ii_src_5 + 2][ii_src_1 + 2][ii_src_2 + 2]*vp[ii_src_5 + 2][ii_src_1 + 2][ii_src_2 + 2])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px)*src[time][p_src]; #pragma omp atomic update v[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12] += r263; } if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r264 = (dt*dt)*(vp[ii_src_5 + 2][ii_src_1 + 2][ii_src_3 + 2]*vp[ii_src_5 + 2][ii_src_1 + 2][ii_src_3 + 2])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*src[time][p_src]; #pragma omp atomic update v[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12] += r264; } if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r265 = (dt*dt)*(vp[ii_src_5 + 2][ii_src_4 + 2][ii_src_2 + 2]*vp[ii_src_5 + 2][ii_src_4 + 2][ii_src_2 + 2])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*src[time][p_src]; #pragma omp atomic update v[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12] += r265; } if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r266 = 1.25e-4F*px*py*pz*(dt*dt)*(vp[ii_src_5 + 2][ii_src_4 + 2][ii_src_3 + 2]*vp[ii_src_5 + 2][ii_src_4 + 2][ii_src_3 + 2])*src[time][p_src]; #pragma omp atomic update v[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12] += r266; } } /* End section2 */ gettimeofday(&end_section2, NULL); timers->section2 += (double)(end_section2.tv_sec-start_section2.tv_sec)+(double)(end_section2.tv_usec-start_section2.tv_usec)/1000000; struct timeval start_section3, end_section3; gettimeofday(&start_section3, NULL); /* Begin section3 */ #pragma omp target teams distribute parallel for collapse(1) for (int p_rec = p_rec_m; p_rec <= p_rec_M; p_rec += 1) { int ii_rec_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*rec_coords[p_rec][0])); int ii_rec_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*rec_coords[p_rec][1])); int ii_rec_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*rec_coords[p_rec][2])); int ii_rec_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*rec_coords[p_rec][2])) + 1; int ii_rec_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*rec_coords[p_rec][1])) + 1; int ii_rec_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*rec_coords[p_rec][0])) + 1; float px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*rec_coords[p_rec][0])) + rec_coords[p_rec][0]); float py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*rec_coords[p_rec][1])) + rec_coords[p_rec][1]); float pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*rec_coords[p_rec][2])) + rec_coords[p_rec][2]); float sum = 0.0F; if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1) { sum += (u[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_2 + 12] + v[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_2 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1); } if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1) { sum += (u[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_3 + 12] + v[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_3 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz); } if (ii_rec_0 >= x_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1) { sum += (u[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_2 + 12] + v[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_2 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py); } if (ii_rec_0 >= x_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*(u[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_3 + 12] + v[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_3 + 12]); } if (ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_5 <= x_M + 1) { sum += (u[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_2 + 12] + v[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_2 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px); } if (ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_5 <= x_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*(u[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_3 + 12] + v[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_3 + 12]); } if (ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*(u[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_2 + 12] + v[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_2 + 12]); } if (ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1) { sum += 1.25e-4F*px*py*pz*(u[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_3 + 12] + v[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_3 + 12]); } rec[time][p_rec] = sum; } /* End section3 */ gettimeofday(&end_section3, NULL); timers->section3 += (double)(end_section3.tv_sec-start_section3.tv_sec)+(double)(end_section3.tv_usec-start_section3.tv_usec)/1000000; } #pragma omp target exit data map(delete: r184[0:x_size + 3 + 3][0:y_size + 3 + 3][0:z_size + 3 + 3]) free(r184); #pragma omp target exit data map(delete: r185[0:x_size + 3 + 3][0:y_size + 3 + 3][0:z_size + 3 + 3]) free(r185); #pragma omp target exit data map(delete: r186[0:x_size + 3 + 3][0:y_size + 3 + 3][0:z_size + 3 + 3]) free(r186); #pragma omp target exit data map(delete: r187[0:x_size + 3 + 3][0:y_size + 3 + 3][0:z_size + 3 + 3]) free(r187); #pragma omp target exit data map(delete: r188[0:x_size + 3 + 3][0:y_size + 3 + 3][0:z_size + 3 + 3]) free(r188); #pragma omp target exit data map(delete: r236[0:x_size + 3 + 3][0:y_size + 3 + 3][0:z_size + 3 + 3]) free(r236); #pragma omp target exit data map(delete: r237[0:x_size + 3 + 3][0:y_size + 3 + 3][0:z_size + 3 + 3]) free(r237); #pragma omp target update from(rec[0:rec_vec->size[0]][0:rec_vec->size[1]]) #pragma omp target exit data map(release: rec[0:rec_vec->size[0]][0:rec_vec->size[1]]) #pragma omp target update from(u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]]) #pragma omp target exit data map(release: u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]]) #pragma omp target update from(v[0:v_vec->size[0]][0:v_vec->size[1]][0:v_vec->size[2]][0:v_vec->size[3]]) #pragma omp target exit data map(release: v[0:v_vec->size[0]][0:v_vec->size[1]][0:v_vec->size[2]][0:v_vec->size[3]]) #pragma omp target exit data map(delete: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]]) #pragma omp target exit data map(delete: delta[0:delta_vec->size[0]][0:delta_vec->size[1]][0:delta_vec->size[2]]) #pragma omp target exit data map(delete: epsilon[0:epsilon_vec->size[0]][0:epsilon_vec->size[1]][0:epsilon_vec->size[2]]) #pragma omp target exit data map(delete: phi[0:phi_vec->size[0]][0:phi_vec->size[1]][0:phi_vec->size[2]]) #pragma omp target exit data map(delete: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]]) #pragma omp target exit data map(delete: src[0:src_vec->size[0]][0:src_vec->size[1]]) #pragma omp target exit data map(delete: src_coords[0:src_coords_vec->size[0]][0:src_coords_vec->size[1]]) #pragma omp target exit data map(delete: theta[0:theta_vec->size[0]][0:theta_vec->size[1]][0:theta_vec->size[2]]) #pragma omp target exit data map(delete: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]]) return 0; }
dispatch.c
/* * Copyright (c) 2013 Mark Heily <mark@heily.com> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <pthread.h> #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ /* * EXPERIMENTAL dispatching API */ void kq_dispatch(kqueue_t kq, void (*cb)(kqueue_t, struct kevent)) { const int maxevents = 64; /* Should be more like 2xNCPU */ struct kevent events[maxevents]; ssize_t nevents; int i; for (;;) { nevents = kq_event(kq, NULL, 0, (struct kevent *) &events, maxevents, NULL); if (nevents < 0) abort(); #pragma omp parallel { for (i = 0; i < nevents; i++) { #pragma omp single nowait (*cb)(kq, events[i]); } } } }
blake2bp.c
/* BLAKE2 reference source code package - optimized C implementations Written in 2012 by Samuel Neves <sneves@dei.uc.pt> To the extent possible under law, the author(s) have dedicated all copyright and related and neighboring rights to this software to the public domain worldwide. This software is distributed without any warranty. You should have received a copy of the CC0 Public Domain Dedication along with this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef HAVE_STDINT_H #include <stdint.h> #endif #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #define PARALLELISM_DEGREE 4 static int blake2bp_init_leaf( blake2b_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32(&P->leaf_length, 0); store64(&P->node_offset, offset); P->node_depth = 0; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); blake2b_init_param( S, P ); S->outlen = P->inner_length; return 0; } static int blake2bp_init_root( blake2b_state *S, uint8_t outlen, uint8_t keylen ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32(&P->leaf_length, 0); store64(&P->node_offset, 0); P->node_depth = 1; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); blake2b_init_param( S, P ); S->outlen = P->digest_length; return 0; } int blake2bp_init( blake2bp_state *S, size_t outlen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, ( uint8_t ) outlen, 0 ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], ( uint8_t ) outlen, 0, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; S->outlen = ( uint8_t ) outlen; return 0; } int blake2bp_init_key( blake2bp_state *S, size_t outlen, const void *key, size_t keylen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, ( uint8_t ) outlen, ( uint8_t ) keylen ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], ( uint8_t ) outlen, ( uint8_t ) keylen, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; S->outlen = ( uint8_t ) outlen; { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } int blake2bp_update( blake2bp_state *S, const uint8_t *in, size_t inlen ) { size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES ); in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) omp_set_num_threads(PARALLELISM_DEGREE); #pragma omp parallel shared(S) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = ( size_t ) omp_get_thread_num(); #endif size_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S->S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = ( uint32_t ) left + ( uint32_t ) inlen; return 0; } int blake2bp_final( blake2bp_state *S, uint8_t *out, size_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; if(S->outlen != outlen) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2B_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES; if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES; blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left ); } blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES ); } for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( S->R, out, outlen ); } int blake2bp( uint8_t *out, const void *in, const void *key, size_t outlen, size_t inlen, size_t keylen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; blake2b_state S[PARALLELISM_DEGREE][1]; blake2b_state FS[1]; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if ( NULL == key && keylen > 0) return -1; if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( keylen > BLAKE2B_KEYBYTES ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S[i], ( uint8_t ) outlen, ( uint8_t ) keylen, i ) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node if( keylen > 0 ) { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) omp_set_num_threads(PARALLELISM_DEGREE); #pragma omp parallel shared(S,hash) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = ( size_t ) omp_get_thread_num(); #endif size_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } if( inlen__ > id__ * BLAKE2B_BLOCKBYTES ) { const size_t left = inlen__ - id__ * BLAKE2B_BLOCKBYTES; const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES; blake2b_update( S[id__], in__, len ); } blake2b_final( S[id__], hash[id__], BLAKE2B_OUTBYTES ); } if( blake2bp_init_root( FS, ( uint8_t ) outlen, ( uint8_t ) keylen ) < 0 ) return -1; FS->last_node = 1; // Mark as last node for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( FS, out, outlen ); }
vednnMaxPoolingBackward.c
#include "vednnMaxPoolingBackward.h" #include "vednn-def.h" #include <stdint.h> static inline vednnError_t vednnMaxPoolingBackward_wrapper( vednnMaxPoolBackward_t pFunc, VEDNN_MAXPOOLINGBKW_ARGS ) { #ifndef VEDNN_USE_OPENMP return pFunc(VEDNN_MAXPOOLINGBKW_ARGS_LIST); #else if ( __vednn_omp_num_threads == 1 ) { return pFunc(VEDNN_MAXPOOLINGBKW_ARGS_LIST); } else { vednnError_t rc = VEDNN_SUCCESS ; #pragma omp parallel reduction(|:rc) { int64_t nthreads = omp_get_num_threads() ; int64_t threadid = omp_get_thread_num() ; int64_t allBatch = pParamGradOut->batch ; int64_t nBatch = allBatch / nthreads ; int64_t remain = allBatch % nthreads ; int64_t batchBegin = nBatch * threadid + ( threadid < remain ? threadid : remain ) ; int64_t myBatch = nBatch + ( threadid < remain ? 1 : 0 ) ; if( myBatch == 0 ) { rc |= VEDNN_SUCCESS ; } else { vednnTensorParam_t _pParamGradOut = *pParamGradOut ; _pParamGradOut.batch = myBatch ; vednnTensorParam_t _pParamOut = *pParamOut ; _pParamOut.batch = myBatch ; vednnTensorParam_t _pParamIn = *pParamIn ; _pParamIn.batch = myBatch ; vednnTensorParam_t _pParamGradIn = *pParamGradIn ; _pParamGradIn.batch = myBatch ; float* _pDataGradOut = ((float *)pDataGradOut) + batchBegin * pParamGradOut->channel * pParamGradOut->height * pParamGradOut->width ; float* _pDataOut = ((float *)pDataOut) + batchBegin * pParamOut->channel * pParamOut->height * pParamOut->width ; float* _pDataIn = ((float *)pDataIn) + batchBegin * pParamIn->channel * pParamIn->height * pParamIn->width ; float* _pDataGradIn = ((float *)pDataGradIn) + batchBegin * pParamGradIn->channel * pParamGradIn->height * pParamGradIn->width ; rc |= pFunc(&_pParamGradOut, (void*) _pDataGradOut, &_pParamOut, (void*) _pDataOut, &_pParamIn, (void*) _pDataIn, &_pParamGradIn, (void*) _pDataGradIn, pParamPool ) ; } } return rc ; } #endif } /* ----------------------------------------------------------------------- */ vednnError_t vednnMaxPoolingBackward( VEDNN_MAXPOOLINGBKW_ARGS ) { #define OMPWRAP( IMPL ) WRAP_RET(vednnMaxPoolingBackward_##IMPL, \ vednnMaxPoolingBackward_wrapper, VEDNN_MAXPOOLINGBKW_ARGS_LIST) if( pParamPool->padHeight == 0 && pParamPool->padWidth == 0 && pParamPool->strideHeight == pParamPool->windowHeight && pParamPool->strideWidth == pParamPool->windowWidth && pParamOut->height*pParamPool->strideHeight <= pParamIn->height && pParamOut->width*pParamPool->strideWidth == pParamIn->width ) { if( pParamOut->width <= 128 ) { if( (pParamPool->windowWidth & 0x01) == 0 && (((uint64_t)pDataIn) & 0x07) == 0 && (((uint64_t)pDataGradIn) & 0x07) == 0 ) OMPWRAP(regular_ww2X_owU128_ialigned); else OMPWRAP(regular_owU128); } else OMPWRAP(regular); } else OMPWRAP(default); } // vim: et sw=2 ts=2
single-modificado-master.c
#include <stdio.h> #include <omp.h> int main() { int n = 9, i, a, b[n]; for (i=0; i<n; i++) b[i] = -1; #pragma omp parallel { #pragma omp single { printf("Dentro de la región parallel:\n"); } #pragma omp single { printf("Introduce valor de inicialización a:"); scanf("%d", &a ); printf("Single ejecutada por el thread %d\n", omp_get_thread_num()); } #pragma omp for for (i=0; i<n; i++) b[i] = a; } #pragma omp master { for (i=0; i<n; i++) printf("b[%d] = %d\t",i,b[i]); printf("\n"); printf("Master ejecutada por el thread %d\n", omp_get_thread_num()); } printf("Después de la región parallel:\n"); printf("Single ejecutada por el thread %d\n", omp_get_thread_num()); for (i=0; i<n; i++) printf("b[%d] = %d\t",i,b[i]); printf("\n"); }
mscash2_fmt_plug.c
/* MSCASH2 patch for John the Ripper written by S3nf in 2010, 2011 * a slow but working version * * Cracking Domain Cached Credentials for modern Windows operating systems, supporting: * - Windows Vista * - Windows 7 * - Windows Server 2008 * * This software was written by S3nf in 2010, 2011. No copyright is claimed, and the software is hereby placed in * the public domain. In case this attempt to disclaim copyright and place the software in the public domain * is deemed null and void, then the software is Copyright (c) 2010, 2011 S3nf and it is hereby released to the * general public under the following terms: * * Redistribution and use in source and binary forms, with or without modification, are permitted. * * Modified for optional utf-8 support by magnum 2011, same terms as above * * Code redone/optimized by JimF June 2011. (2x to 10x improvement in speed) * - Code converted to oSSL (for non-sse builds). The inline MD4/SHA1 replaced. This reduced * about 900 lines down to 60 or so, which were much easier to follow. This was a preliminary * step to getting SSE2 added. Once done, this ended up faster than the original, so the new * simplified code was kept. * - Setup of ipad/opad only done once per PW/Salt about 10-15% speedup * - 1/2 of the encryption performed within inner loop was moved outside of inner loop (nearly doubles speed) * - changed signature from M$salt#hash to $DCC2$iterations#salt#hash * - variable iterations now 'possible'. Default is 10240 * - increased salt (user name) upto 22 UC2 characters. Bug in original code only allowed up to 8 chars. * - Added SSE2(/MMX) and SSE2i to the deep inner loop. 2x to 4x speedup. * - total about 2x to 10x improvment in speed (depending upon CPU and compiler). Some compilers * were more efficient with original code, and thus received less of a performance boost. Others * got a signicant improvment. * - The utf8 code was greatly simplified. There was no reason to try to optimized the UTF code as * the format is so slow that utf8 conversion is a non-issue. Thus we always call the enc_to_utf16() * at the proper locations, and let that function deal with being in --encoding=utf8 switch mode or not. * - Fixed code to properly work with BE systems, and alignment required systems. * - Made some 'interface' changes to the SSE2i for SHA1, and to the sha-mmx.S code, to make it work * properly, and to make it more efficient. We deal with 2 SHA1 states, and alternate back and forth * between them. The changes to the SSE2i code, were to optimize this dual state, and the changes * to the .S code were simply to make it work at all and the same optimizations were placed there. * - the OMP code was removed during initial re-write, and was properly re-incorporated by magnum. * * In June 2013, salt length (Username) increased from 22 to 128, and max password length increased * from 27 to 125 bytes (unicode bytes, so 250 ?) * * This module is based on: * - the MSCASH patch for john written by Alain Espinosa <alainesp at gmail.com> in 2007 * - RFC 1320 - The MD4 Message-Digest Algorithm * - RFC 2104 - HMAC: Keyed-Hashing for Message Authentication * - RFC 3174 - US Secure Hash Algorithm 1 (SHA1) * - the HMAC-SHA1 implementation of the PolarSSL open source cryptographic library (http://polarssl.org/) */ #if FMT_EXTERNS_H extern struct fmt_main fmt_mscash2; #elif FMT_REGISTERS_H john_register_one(&fmt_mscash2); #else #include <string.h> #include "arch.h" #include "misc.h" #include "memory.h" #include "common.h" #include "formats.h" #include "unicode.h" #include "options.h" #include "unicode.h" #include "sha.h" #include "md4.h" #include "simd-intrinsics.h" #include "loader.h" #include "mscash_common.h" #if defined (_OPENMP) #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 8 // Tuned on Corei7 Quad-HT #endif #endif #include "memdbg.h" #define ITERATIONS 10240 static unsigned iteration_cnt = (ITERATIONS); /* this will get changed at runtime, salt loading */ #define FORMAT_LABEL "mscash2" #define FORMAT_NAME "MS Cache Hash 2 (DCC2)" #define MAX_SALT_LEN 128 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE (MAX_SALT_LEN*2+4) #define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME #ifdef SIMD_COEF_32 #define MS_NUM_KEYS (SIMD_COEF_32*SIMD_PARA_SHA1) // Ok, now we have our MMX/SSE2/intr buffer. // this version works properly for MMX, SSE2 (.S) and SSE2 intrinsic. #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3) )*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 ) //for endianity conversion static unsigned char (*sse_hash1); static unsigned char (*sse_crypt1); static unsigned char (*sse_crypt2); #else #define MS_NUM_KEYS 1 #endif #define MIN_KEYS_PER_CRYPT MS_NUM_KEYS #define MAX_KEYS_PER_CRYPT MS_NUM_KEYS #define HASH_LEN (16+48) static unsigned char *salt_buffer; static unsigned int salt_len; static unsigned char(*key); static unsigned int new_key = 1; static unsigned char(*md4hash); // allows the md4 of user, and salt to be appended to it. the md4 is ntlm, with the salt is DCC1 static unsigned int (*crypt_out); static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); if (omp_t < 1) omp_t = 1; self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif key = mem_calloc(self->params.max_keys_per_crypt, (PLAINTEXT_LENGTH + 1)); md4hash = mem_calloc(self->params.max_keys_per_crypt, HASH_LEN); crypt_out = mem_calloc(self->params.max_keys_per_crypt, BINARY_SIZE); #if defined (SIMD_COEF_32) sse_hash1 = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*sse_hash1)*SHA_BUF_SIZ*4, MEM_ALIGN_SIMD); sse_crypt1 = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*sse_crypt1) * 20, MEM_ALIGN_SIMD); sse_crypt2 = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*sse_crypt2) * 20, MEM_ALIGN_SIMD); { int index; for (index = 0; index < self->params.max_keys_per_crypt; ++index) { // set the length of all hash1 SSE buffer to 64+20 * 8 bits // The 64 is for the ipad/opad, the 20 is for the length of the SHA1 buffer that also gets into each crypt // this works for SSEi ((unsigned int *)sse_hash1)[15*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = (84<<3); // all encrypts are 64+20 bytes. sse_hash1[GETPOS(20,index)] = 0x80; } } // From this point on, we ONLY touch the first 20 bytes (* SIMD_COEF_32) of each buffer 'block'. If !SHA_PARA', then only the first // block is written to after this, if there are more that one SHA_PARA, then the start of each para block will be updated inside the inner loop. #endif mscash2_adjust_tests(options.target_enc, PLAINTEXT_LENGTH, MAX_SALT_LEN); } static void done(void) { #ifdef SIMD_COEF_32 MEM_FREE(sse_crypt2); MEM_FREE(sse_crypt1); MEM_FREE(sse_hash1); #endif MEM_FREE(crypt_out); MEM_FREE(md4hash); MEM_FREE(key); } static int valid(char *ciphertext, struct fmt_main *self) { return mscash2_common_valid(ciphertext, MAX_SALT_LEN, self); } static void set_salt(void *salt) { UTF16 *p = (UTF16*)salt; salt_len = *p++; iteration_cnt = *p++; salt_buffer = (unsigned char*)p; } static void *get_salt(char *ciphertext) { static UTF16 out[130+1]; unsigned char input[MAX_SALT_LEN*3+1]; int i, iterations, utf16len; char *lasth = strrchr(ciphertext, '#'); memset(out, 0, sizeof(out)); sscanf(&ciphertext[6], "%d", &iterations); ciphertext = strchr(ciphertext, '#') + 1; for (i = 0; &ciphertext[i] < lasth; i++) input[i] = (unsigned char)ciphertext[i]; input[i] = 0; utf16len = enc_to_utf16(&out[2], MAX_SALT_LEN, input, i); if (utf16len < 0) utf16len = strlen16(&out[2]); out[0] = utf16len << 1; out[1] = iterations; return out; } static void *get_binary(char *ciphertext) { static unsigned int out[BINARY_SIZE / sizeof(unsigned int)]; unsigned int i; unsigned int temp; /* We need to allow salt containing '#' so we search backwards */ ciphertext = strrchr(ciphertext, '#') + 1; for (i = 0; i < 4 ;i++) { #if ARCH_LITTLE_ENDIAN temp = ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 0])])) << 4; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 1])])); temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 2])])) << 12; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 3])])) << 8; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 4])])) << 20; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 5])])) << 16; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 6])])) << 28; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 7])])) << 24; #else temp = ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 6])])) << 4; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 7])])); temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 4])])) << 12; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 5])])) << 8; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 2])])) << 20; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 3])])) << 16; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 0])])) << 28; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 1])])) << 24; #endif out[i] = temp; } #ifdef SIMD_COEF_32 alter_endianity(out, BINARY_SIZE); #endif return out; } static int binary_hash_0(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_0; } static int binary_hash_1(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_1; } static int binary_hash_2(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_2; } static int binary_hash_3(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_3; } static int binary_hash_4(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_4; } static int binary_hash_5(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_5; } static int binary_hash_6(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_6; } static int get_hash_0(int index) { return crypt_out[4 * index + 3] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[4 * index + 3] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[4 * index + 3] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[4 * index + 3] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[4 * index + 3] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[4 * index + 3] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[4 * index + 3] & PH_MASK_6; } static int cmp_all(void *binary, int count) { unsigned int i = 0; unsigned int d = ((unsigned int *)binary)[3]; for (; i < count; i++) if (d == crypt_out[i * 4 + 3]) return 1; return 0; } static int cmp_one(void * binary, int index) { unsigned int *t = (unsigned int *)binary; unsigned int a = crypt_out[4 * index + 0]; unsigned int b = crypt_out[4 * index + 1]; unsigned int c = crypt_out[4 * index + 2]; unsigned int d = crypt_out[4 * index + 3]; if (d != t[3]) return 0; if (c != t[2]) return 0; if (b != t[1]) return 0; return (a == t[0]); } static int cmp_exact(char *source, int index) { return 1; } static void set_key(char *_key, int index) { strnzcpy ((char*)&key[index*(PLAINTEXT_LENGTH + 1)], _key, (PLAINTEXT_LENGTH + 1)); new_key = 1; } static char *get_key(int index) { return (char*)&key[index*(PLAINTEXT_LENGTH + 1)]; } // Public domain hash function by DJ Bernstein (salt is a username) static int salt_hash(void *salt) { UTF16 *n = salt, i; unsigned char *s = (unsigned char*)n; unsigned int hash = 5381; for (i = 0; i < (*n+2); ++i) hash = ((hash<<5)+hash) ^ s[i]; return hash & (SALT_HASH_SIZE - 1); } #ifdef SIMD_COEF_32 // NOTE, in the end, this block will move above the pbkdf2() function, and the #else and #endif wrapping that function will be // uncommented. Thus, if built for SSE2 (mmx, or intrisic), we get this function. Otherwise we get the pbkdf2() function which // uses OpenSSL. However to get the 'layout' right, The code here will walk through the array buffer, calling the pbkdf2 // function. static void pbkdf2_sse2(int t) { // Thread safe, t is our thread number. // All indexes into buffers are offset by (t * MS_NUM_KEYS * (size)) SHA_CTX ctx1, ctx2; unsigned int ipad[SHA_LBLOCK], opad[SHA_LBLOCK]; unsigned int tmp_hash[SHA_DIGEST_LENGTH/4]; unsigned int i, j, k, *i1, *i2, *o1, *t_crypt; unsigned char *t_sse_crypt1, *t_sse_crypt2, *t_sse_hash1; memset(&ipad[4], 0x36, SHA_CBLOCK-16); memset(&opad[4], 0x5C, SHA_CBLOCK-16); // All pointers get their offset for this thread here. No further offsetting below. t_crypt = &crypt_out[t * MS_NUM_KEYS * 4]; t_sse_crypt1 = &sse_crypt1[t * MS_NUM_KEYS * 20]; t_sse_crypt2 = &sse_crypt2[t * MS_NUM_KEYS * 20]; t_sse_hash1 = &sse_hash1[t * MS_NUM_KEYS * SHA_BUF_SIZ * 4]; i1 = (unsigned int*)t_sse_crypt1; i2 = (unsigned int*)t_sse_crypt2; o1 = (unsigned int*)t_sse_hash1; for (k = 0; k < MS_NUM_KEYS; ++k) { for (i = 0;i < 4;i++) { ipad[i] = t_crypt[k*4+i]^0x36363636; opad[i] = t_crypt[k*4+i]^0x5C5C5C5C; } SHA1_Init(&ctx1); SHA1_Init(&ctx2); SHA1_Update(&ctx1,ipad,SHA_CBLOCK); SHA1_Update(&ctx2,opad,SHA_CBLOCK); // we memcopy from flat into SIMD_COEF_32 output buffer's (our 'temp' ctx buffer). // This data will NOT need to be BE swapped (it already IS BE swapped). i1[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))] = ctx1.h0; i1[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+SIMD_COEF_32] = ctx1.h1; i1[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+(SIMD_COEF_32<<1)] = ctx1.h2; i1[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+SIMD_COEF_32*3] = ctx1.h3; i1[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+(SIMD_COEF_32<<2)] = ctx1.h4; i2[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))] = ctx2.h0; i2[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+SIMD_COEF_32] = ctx2.h1; i2[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+(SIMD_COEF_32<<1)] = ctx2.h2; i2[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+SIMD_COEF_32*3] = ctx2.h3; i2[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+(SIMD_COEF_32<<2)] = ctx2.h4; SHA1_Update(&ctx1,salt_buffer,salt_len); SHA1_Update(&ctx1,"\x0\x0\x0\x1",4); SHA1_Final((unsigned char*)tmp_hash,&ctx1); SHA1_Update(&ctx2,(unsigned char*)tmp_hash,SHA_DIGEST_LENGTH); SHA1_Final((unsigned char*)tmp_hash,&ctx2); // now convert this from flat into SIMD_COEF_32 buffers. // Also, perform the 'first' ^= into the crypt buffer. NOTE, we are doing that in BE format // so we will need to 'undo' that in the end. o1[(k/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(k&(SIMD_COEF_32-1))] = t_crypt[k*4+0] = ctx2.h0; o1[(k/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(k&(SIMD_COEF_32-1))+SIMD_COEF_32] = t_crypt[k*4+1] = ctx2.h1; o1[(k/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(k&(SIMD_COEF_32-1))+(SIMD_COEF_32<<1)] = t_crypt[k*4+2] = ctx2.h2; o1[(k/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(k&(SIMD_COEF_32-1))+SIMD_COEF_32*3] = t_crypt[k*4+3] = ctx2.h3; o1[(k/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(k&(SIMD_COEF_32-1))+(SIMD_COEF_32<<2)] = ctx2.h4; } for (i = 1; i < iteration_cnt; i++) { SIMDSHA1body((unsigned int*)t_sse_hash1, (unsigned int*)t_sse_hash1, (unsigned int*)t_sse_crypt1, SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT); SIMDSHA1body((unsigned int*)t_sse_hash1, (unsigned int*)t_sse_hash1, (unsigned int*)t_sse_crypt2, SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT); // only xor first 16 bytes, since that is ALL this format uses for (k = 0; k < MS_NUM_KEYS; k++) { unsigned *p = &((unsigned int*)t_sse_hash1)[k/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32 + (k&(SIMD_COEF_32-1))]; for (j = 0; j < 4; j++) t_crypt[k*4+j] ^= p[(j*SIMD_COEF_32)]; } } } #else /* * This function is derived from IEEE Std 802.11-2004, Clause H.4. * The main construction is from PKCS#5 v2.0. It is tweaked a little * to remove some code not needed for our SHA1-128 output. */ static void pbkdf2(unsigned int _key[]) // key is also 'final' digest. { SHA_CTX ctx1, ctx2, tmp_ctx1, tmp_ctx2; unsigned char ipad[SHA_CBLOCK], opad[SHA_CBLOCK]; unsigned int tmp_hash[SHA_DIGEST_LENGTH/4]; unsigned i, j; unsigned char *key = (unsigned char*)_key; for (i = 0; i < 16; i++) { ipad[i] = key[i]^0x36; opad[i] = key[i]^0x5C; } memset(&ipad[16], 0x36, sizeof(ipad)-16); memset(&opad[16], 0x5C, sizeof(opad)-16); SHA1_Init(&ctx1); SHA1_Init(&ctx2); SHA1_Update(&ctx1, ipad, SHA_CBLOCK); SHA1_Update(&ctx2, opad, SHA_CBLOCK); memcpy(&tmp_ctx1, &ctx1, sizeof(SHA_CTX)); memcpy(&tmp_ctx2, &ctx2, sizeof(SHA_CTX)); SHA1_Update(&ctx1, salt_buffer, salt_len); SHA1_Update(&ctx1, "\x0\x0\x0\x1", 4); SHA1_Final((unsigned char*)tmp_hash,&ctx1); SHA1_Update(&ctx2, (unsigned char*)tmp_hash, SHA_DIGEST_LENGTH); // we have to sha1 final to a 'temp' buffer, since we can only overwrite first 16 bytes // of the _key buffer. If we overwrote 20 bytes, then we would lose the first 4 bytes // of the next element (and overwrite end of buffer on last element). SHA1_Final((unsigned char*)tmp_hash, &ctx2); // only copy first 16 bytes, since that is ALL this format uses memcpy(_key, tmp_hash, 16); for (i = 1; i < iteration_cnt; i++) { // we only need to copy the accumulator data from the CTX, since // the original encryption was a full block of 64 bytes. memcpy(&ctx1, &tmp_ctx1, sizeof(SHA_CTX)-(64+sizeof(unsigned int))); SHA1_Update(&ctx1, (unsigned char*)tmp_hash, SHA_DIGEST_LENGTH); SHA1_Final((unsigned char*)tmp_hash, &ctx1); memcpy(&ctx2, &tmp_ctx2, sizeof(SHA_CTX)-(64+sizeof(unsigned int))); SHA1_Update(&ctx2, (unsigned char*)tmp_hash, SHA_DIGEST_LENGTH); SHA1_Final((unsigned char*)tmp_hash, &ctx2); // only xor first 16 bytes, since that is ALL this format uses for (j = 0; j < 4; j++) _key[j] ^= tmp_hash[j]; } } #endif static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int i, t, t1; // Note, for a format like DCC2, there is little reason to optimize anything other // than the pbkdf2 inner loop. The one exception to that, is the NTLM can be done // and known when to be done, only when the // now get NTLM of the password (MD4 of unicode) if (new_key) { #if MS_NUM_KEYS > 1 && defined(_OPENMP) #pragma omp parallel for default(none) private(i) shared(count, key, md4hash) #endif for (i = 0; i < count; ++i) { int utf16len; UTF16 pass_unicode[PLAINTEXT_LENGTH+1]; MD4_CTX ctx; utf16len = enc_to_utf16(pass_unicode, PLAINTEXT_LENGTH, &key[(PLAINTEXT_LENGTH + 1)*i], strlen((char*)&key[(PLAINTEXT_LENGTH + 1)*i])); if (utf16len <= 0) { key[(PLAINTEXT_LENGTH + 1)*i-utf16len] = 0; if (utf16len != 0) utf16len = strlen16(pass_unicode); } MD4_Init(&ctx); MD4_Update(&ctx, pass_unicode, utf16len<<1); MD4_Final(&md4hash[HASH_LEN*i], &ctx); } new_key = 0; } #ifdef _OPENMP #pragma omp parallel for default(none) private(t) shared(count, salt_buffer, salt_len, crypt_out, md4hash) #endif for (t1 = 0; t1 < count; t1 += MS_NUM_KEYS) { MD4_CTX ctx; int i; t = t1 / MS_NUM_KEYS; for (i = 0; i < MS_NUM_KEYS; ++i) { // Get DCC1. That is MD4( NTLM . unicode(lc username) ) MD4_Init(&ctx); MD4_Update(&ctx, &md4hash[(t * MS_NUM_KEYS + i) * HASH_LEN], 16); MD4_Update(&ctx, salt_buffer, salt_len); MD4_Final((unsigned char*)&crypt_out[(t * MS_NUM_KEYS + i) * 4], &ctx); // now we have DCC1 (mscash) which is MD4 (MD4(unicode(pass)) . unicode(lc username)) #ifndef SIMD_COEF_32 // Non-SSE: Compute DCC2 one at a time pbkdf2(&crypt_out[(t * MS_NUM_KEYS + i) * 4]); #endif } #ifdef SIMD_COEF_32 // SSE: Compute DCC2 in parallel, once per thread pbkdf2_sse2(t); #endif } return count; } struct fmt_main fmt_mscash2 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_UNICODE | FMT_UTF8, { NULL }, { FORMAT_TAG2 }, mscash2_common_tests }, { init, done, fmt_default_reset, mscash2_common_prepare, valid, mscash2_common_split, get_binary, get_salt, { NULL }, fmt_default_source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, binary_hash_5, binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_binop__minus_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__minus_int16 // A.*B function (eWiseMult): GB_AemultB__minus_int16 // A*D function (colscale): GB_AxD__minus_int16 // D*A function (rowscale): GB_DxB__minus_int16 // C+=B function (dense accum): GB_Cdense_accumB__minus_int16 // C+=b function (dense accum): GB_Cdense_accumb__minus_int16 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__minus_int16 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__minus_int16 // C=scalar+B GB_bind1st__minus_int16 // C=scalar+B' GB_bind1st_tran__minus_int16 // C=A+scalar GB_bind2nd__minus_int16 // C=A'+scalar GB_bind2nd_tran__minus_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x - y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_INT16 || GxB_NO_MINUS_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__minus_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__minus_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__minus_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__minus_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__minus_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__minus_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__minus_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__minus_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__minus_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__minus_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB_bind1st_tran__minus_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB_bind2nd_tran__minus_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
kernel.h
void sneaky_snake( const int nTeams, const int nThreads, const uint*__restrict F_ReadSeq, const uint*__restrict F_RefSeq, int*__restrict Ftest_Results, const int NumReads, const int F_ErrorThreshold) { #pragma omp target teams distribute parallel for num_teams(nTeams) thread_limit(nThreads) for (int tid = 0;tid < NumReads; tid++) { uint ReadsPerThread[NBytes]; uint RefsPerThread[NBytes]; #pragma unroll for (int i = 0; i < NBytes; i++) { ReadsPerThread[i] = F_ReadSeq[tid*8 + i]; RefsPerThread[i] = F_RefSeq[tid*8 + i]; } ///////////////////////////////////////////////////////////////////////////// Ftest_Results[tid] = 1; uint ReadCompTmp = 0; uint RefCompTmp = 0; uint DiagonalResult = 0; uint ReadTmp1 = 0; uint ReadTmp2 = 0; uint RefTmp1 = 0; uint RefTmp2 = 0; uint CornerCase = 0; int localCounter= 0; int localCounterMax=0; int globalCounter = 0; int Max_leading_zeros = 0; int AccumulatedErrs = 0; int Diagonal = 0; int ShiftValue = 0; int j = 0; //specifying the j-th int that we are reading in each read-ref comparison (can be from 0 to 7) while ( (j < 7) && (globalCounter < 200)) { Diagonal = 0; RefTmp1 = lsl(RefsPerThread[j], ShiftValue); RefTmp2 = lsr(RefsPerThread[j + 1], 32 - ShiftValue); ReadTmp1 = lsl(ReadsPerThread[j], ShiftValue); ReadTmp2 = lsr(ReadsPerThread[j + 1], 32 - ShiftValue); ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = ReadCompTmp ^ RefCompTmp; localCounterMax = __clz(DiagonalResult); //////////////////// Upper diagonals ///////////////////// for(int e = 1; e <= F_ErrorThreshold; e++) { Diagonal += 1; CornerCase = 0; if ( (j == 0) && ( (ShiftValue - (2*e)) < 0 ) ) { ReadTmp1 = lsr(ReadsPerThread[j], 2*e - ShiftValue); ReadTmp2 = 0; ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = ReadCompTmp ^ RefCompTmp; CornerCase = 0; for(int Ci = 0; Ci < (2*e) - ShiftValue; Ci++) { set_bit(CornerCase, 31 - Ci); } DiagonalResult = DiagonalResult | CornerCase; localCounter = __clz(DiagonalResult); } else if ( (ShiftValue - (2*e) ) < 0 ) { ReadTmp1 = lsl(ReadsPerThread[j-1], 32 - (2*e - ShiftValue)); ReadTmp2 = lsr(ReadsPerThread[j], 2*e - ShiftValue); ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = ReadCompTmp ^ RefCompTmp; localCounter = __clz(DiagonalResult); } else { ReadTmp1 = lsl(ReadsPerThread[j], ShiftValue - 2*e); ReadTmp2 = lsr(ReadsPerThread[j+1], 32 - (ShiftValue - 2*e)) ; ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = ReadCompTmp ^ RefCompTmp; localCounter = __clz(DiagonalResult); } if (localCounter>localCounterMax) localCounterMax=localCounter; } /* sh = shift up = upper diagonal RC = ReadCompTmp FC = RefCompTmp D = DiagonalResult DN = diagonal LC = localCounter */ //////////////////// Lower diagonals ///////////////////// for(int e = 1; e <= F_ErrorThreshold; e++) { Diagonal += 1; CornerCase = 0; if (j<5) { if ((ShiftValue + 2*e) < 32) { ReadTmp1 = lsl(ReadsPerThread[j], ShiftValue + 2*e); ReadTmp2 = lsr(ReadsPerThread[j+1], 32 - (ShiftValue + 2*e)); ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = ReadCompTmp ^ RefCompTmp; localCounter = __clz(DiagonalResult); } else { ReadTmp1 = lsl(ReadsPerThread[j+1], (ShiftValue + 2*e) % 32); ReadTmp2 = lsr(ReadsPerThread[j+2], 32 - (ShiftValue + 2*e) % 32); ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = 0xffffffff;//ReadCompTmp ^ RefCompTmp; DiagonalResult = ReadCompTmp ^ RefCompTmp; localCounter = __clz(DiagonalResult); } } else { ReadTmp1 = lsl(ReadsPerThread[j], ShiftValue + 2*e); ReadTmp2 = lsr(ReadsPerThread[j+1], 32 - (ShiftValue + 2*e)); ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = ReadCompTmp ^ RefCompTmp; CornerCase = 0; if ((globalCounter+32)>200) { for(int Ci = globalCounter+32-200; Ci < globalCounter+32-200+2*e; Ci++) { set_bit(CornerCase, Ci); } } else if ((globalCounter+32)>=(200- (2*e))){ for(int Ci = 0; Ci < (2*e); Ci++) { set_bit(CornerCase, Ci); } } DiagonalResult = DiagonalResult | CornerCase; localCounter = __clz(DiagonalResult); } if (localCounter>localCounterMax) localCounterMax=localCounter; } /* CC = CornerCase sh = shift up = upper diagonal RC = ReadCompTmp FC = RefCompTmp D = DiagonalResult DN = diagonal LC = localCounter */ Max_leading_zeros = 0; if ( (j == 6) && ( ((localCounterMax/2)*2) >= 8) ) { Max_leading_zeros = 8; break; } else if((localCounterMax/2*2) > Max_leading_zeros) { Max_leading_zeros = ((localCounterMax/2)*2); } if (((Max_leading_zeros/2) < 16) && (j < 5)) { AccumulatedErrs += 1; } else if ((j == 6) && ((Max_leading_zeros/2) < 4)) { AccumulatedErrs += 1; } if(AccumulatedErrs > F_ErrorThreshold) { Ftest_Results[tid] = 0; break; } if(ShiftValue + Max_leading_zeros + 2 >= 32) { j += 1; } // ShiftValue_2Ref = (ShiftValue_2Ref + Max_leading_zeros + 2) %32; if (Max_leading_zeros == 32) { globalCounter += Max_leading_zeros; } else { ShiftValue = ((ShiftValue + Max_leading_zeros + 2) % 32); globalCounter += (Max_leading_zeros + 2); } } } }
matrix_cpu.c
/* Copyright 2015 The math21 Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "../vector/files_c.h" #include "matrix_cpu.h" // C = k1*A*B + C void math21_matrix_multiply_k1AB_add_k2C_similar_nn_cpu(int nr_C, int nc_C, int n_common, float k1, const float *A, int lda, const float *B, int ldb, float *C, int ldc) { int i, j, k; #pragma omp parallel for for (i = 0; i < nr_C; ++i) { for (k = 0; k < n_common; ++k) { register float A_PART = k1 * A[i * lda + k]; for (j = 0; j < nc_C; ++j) { C[i * ldc + j] += A_PART * B[k * ldb + j]; } } } } // C = k1*A*B.t + C void math21_matrix_multiply_k1AB_add_k2C_similar_nt_cpu(int nr_C, int nc_C, int n_common, float k1, const float *A, int lda, const float *B, int ldb, float *C, int ldc) { int i, j, k; #pragma omp parallel for for (i = 0; i < nr_C; ++i) { for (j = 0; j < nc_C; ++j) { register float sum = 0; for (k = 0; k < n_common; ++k) { sum += k1 * A[i * lda + k] * B[j * ldb + k]; } C[i * ldc + j] += sum; } } } // C = k1*A.t*B + C void math21_matrix_multiply_k1AB_add_k2C_similar_tn_cpu(int nr_C, int nc_C, int n_common, float k1, const float *A, int lda, const float *B, int ldb, float *C, int ldc) { int i, j, k; #pragma omp parallel for for (i = 0; i < nr_C; ++i) { for (k = 0; k < n_common; ++k) { register float A_PART = k1 * A[k * lda + i]; for (j = 0; j < nc_C; ++j) { C[i * ldc + j] += A_PART * B[k * ldb + j]; } } } } // C = k1*A.t*B.t + C void math21_matrix_multiply_k1AB_add_k2C_similar_tt_cpu(int nr_C, int nc_C, int n_common, float k1, const float *A, int lda, const float *B, int ldb, float *C, int ldc) { int i, j, k; #pragma omp parallel for for (i = 0; i < nr_C; ++i) { for (j = 0; j < nc_C; ++j) { register float sum = 0; for (k = 0; k < n_common; ++k) { sum += k1 * A[k * lda + i] * B[j * ldb + k]; } C[i * ldc + j] += sum; } } } // math21_matrix_multiply_k1AB_add_k2C_similar // C = k1*(A*B) + k2*C or similar void math21_matrix_multiply_k1AB_add_k2C_similar_cpu(int ta, int tb, int nr_C, int nc_C, int n_common, float k1, const float *A, int lda, const float *B, int ldb, float k2, float *C, int ldc) { //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",ta, tb, M, N, K, k1, lda, ldb, k2, ldc); int i, j; for (i = 0; i < nr_C; ++i) { for (j = 0; j < nc_C; ++j) { C[i * ldc + j] *= k2; } } if (!ta && !tb) math21_matrix_multiply_k1AB_add_k2C_similar_nn_cpu(nr_C, nc_C, n_common, k1, A, lda, B, ldb, C, ldc); else if (ta && !tb) math21_matrix_multiply_k1AB_add_k2C_similar_tn_cpu(nr_C, nc_C, n_common, k1, A, lda, B, ldb, C, ldc); else if (!ta && tb) math21_matrix_multiply_k1AB_add_k2C_similar_nt_cpu(nr_C, nc_C, n_common, k1, A, lda, B, ldb, C, ldc); else math21_matrix_multiply_k1AB_add_k2C_similar_tt_cpu(nr_C, nc_C, n_common, k1, A, lda, B, ldb, C, ldc); } float* math21_matrix_create_random_cpu(int nr, int nc){ float *m = (float *) math21_vector_create_with_default_value_cpu(nr*nc, 0); math21_vector_set_random_cpu(nr * nc, m); return m; }
test.c
#include <stdio.h> #include <omp.h> #pragma omp requires unified_shared_memory #include "../utilities/check.h" #include "../utilities/utilities.h" #define HOST_MAX_TEAMS 128 #define TRIALS (1) #define N (992) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; double * pA = malloc(N*sizeof(double)); int fail = 0; INIT(); // // Test: if clause // ZERO(A); int num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512; // the number of teams started is implementation dependent int actual_teams = -1; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams if(0) map(tofrom:actual_teams) { if(omp_get_team_num() == 0) actual_teams = omp_get_num_teams(); A[omp_get_team_num()] += omp_get_team_num(); } } for (int i = 0 ; i < actual_teams ; i++) if (A[i] != i*TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: device clause // ZERO(A); num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams device(0) map(tofrom:actual_teams) { if(omp_get_team_num() == 0) actual_teams = omp_get_num_teams(); A[omp_get_team_num()] += omp_get_team_num(); } } for (int i = 0 ; i < actual_teams ; i++) if (A[i] != i*TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: map clause // ZERO(pA); num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams map(pA[:N]) map(tofrom:actual_teams) { if(omp_get_team_num() == 0) actual_teams = omp_get_num_teams(); pA[omp_get_team_num()] += omp_get_team_num(); } } for (int i = 0 ; i < actual_teams ; i++) if (pA[i] != i*TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, pA[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: num_teams and omp_get_team_num() // ZERO(A); num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams num_teams(num_teams) { A[omp_get_team_num()] += omp_get_team_num(); } } for (int i = 0 ; i < num_teams ; i++) if (A[i] != i*TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: thread_limit and omp_get_thread_num() // ZERO(A); fail = 0; int num_threads = omp_is_initial_device() ? HOST_MAX_TEAMS : 256; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams num_teams(1) thread_limit(num_threads) #pragma omp parallel { int tid = omp_get_thread_num(); A[tid] += (double) tid; } } for (int i = 0 ; i < num_threads ; i++) if (A[i] != i*TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: if statement in teams region // ZERO(A); fail = 0; num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams num_teams(num_teams) { if (omp_get_team_num() % 2 == 0) { int teid = omp_get_team_num(); A[teid] += (double) 1; } else { int teid = omp_get_team_num(); A[teid] += (double) 2; } } } for (int i = 0 ; i < num_teams ; i++) { if (i % 2 == 0) { if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } } else if (A[i] != 2*TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) 2*TRIALS, A[i]); fail = 1; } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); /* // */ /* // Test: num_teams and thread_limit by simulating a distribute pragma */ /* // */ /* ZERO(A); */ /* fail = 0; */ /* for (int t = 0 ; t < TRIALS ; t++) { */ /* #pragma omp target teams num_teams(2) thread_limit(496) */ /* { */ /* if (omp_get_team_num() == 0) { */ /* #pragma omp parallel */ /* { */ /* A[omp_get_team_num()*496+omp_get_thread_num()] += omp_get_thread_num(); */ /* if(omp_get_thread_num() == 498) printf("teid = %d, tid = %d, accessing %d\n", omp_get_team_num(), omp_get_thread_num(), omp_get_team_num()*496+omp_get_thread_num()); */ /* } */ /* } else { */ /* #pragma omp parallel */ /* { */ /* if(omp_get_thread_num() == 0) */ /* printf("teid = %d, tid = %d: A= %lf\n", omp_get_team_num(), omp_get_thread_num(), A[omp_get_team_num()*496+omp_get_thread_num()]); */ /* A[omp_get_team_num()*496+omp_get_thread_num()] -= omp_get_thread_num(); */ /* if(omp_get_thread_num() == 0) */ /* printf("teid = %d, tid = %d: A= %lf\n", omp_get_team_num(), omp_get_thread_num(), A[omp_get_team_num()*496+omp_get_thread_num()]); */ /* } */ /* } */ /* } */ /* } */ /* for (int i = 0 ; i < 992 ; i++) { */ /* if (i < 496) { */ /* if (A[i] != i*TRIALS) { */ /* printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]); */ /* fail = 1; */ /* } */ /* } else if(i >= 496) */ /* if (A[i] != -((i-496)*TRIALS)) { */ /* printf("Error at %d, h = %lf, d = %lf\n", i, (double) -((i-496)*TRIALS), A[i]); */ /* fail = 1; */ /* } */ /* } */ /* if(fail) printf("Failed\n"); */ /* else printf("Succeeded\n"); */ // // Test: private // ZERO(A); fail = 0; int a = 10; num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 256; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams num_teams(num_teams) private(a) { a = omp_get_team_num(); A[omp_get_team_num()] += a; } } for (int i = 0 ; i < num_teams ; i++) if (A[i] != i*TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: firstprivate // ZERO(A); fail = 0; a = 10; num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 256; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams num_teams(num_teams) firstprivate(a) { a += omp_get_team_num(); A[omp_get_team_num()] += a; } } for (int i = 0 ; i < num_teams ; i++) if (A[i] != 10+i*TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) (10+i*TRIALS), A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); return 0; }
restriction.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ static inline void restriction_pc_block(level_type *level_c, int id_c, level_type *level_f, int id_f, blockCopy_type *block, int restrictionType){ // restrict 3D array from read_i,j,k of read[] to write_i,j,k in write[] using piecewise constant restriction (cell averaged) int dim_i = block->dim.i; // calculate the dimensions of the resultant coarse block int dim_j = block->dim.j; int dim_k = block->dim.k; int read_i = block->read.i; int read_j = block->read.j; int read_k = block->read.k; int read_jStride = block->read.jStride; int read_kStride = block->read.kStride; int write_i = block->write.i; int write_j = block->write.j; int write_k = block->write.k; int write_jStride = block->write.jStride; int write_kStride = block->write.kStride; double * __restrict__ read = block->read.ptr; double * __restrict__ write = block->write.ptr; if(block->read.box >=0){ read_jStride = level_f->my_boxes[block->read.box ].jStride; read_kStride = level_f->my_boxes[block->read.box ].kStride; read = level_f->my_boxes[ block->read.box].vectors[id_f] + level_f->my_boxes[ block->read.box].ghosts*(1+ read_jStride+ read_kStride); } if(block->write.box>=0){ write_jStride = level_c->my_boxes[block->write.box].jStride; write_kStride = level_c->my_boxes[block->write.box].kStride; write = level_c->my_boxes[block->write.box].vectors[id_c] + level_c->my_boxes[block->write.box].ghosts*(1+write_jStride+write_kStride); } int i,j,k; int ii,jj,kk; switch(restrictionType){ case RESTRICT_CELL: for(k=0,kk=0;k<dim_k;k++,kk+=2){ for(j=0,jj=0;j<dim_j;j++,jj+=2){ for(i=0,ii=0;i<dim_i;i++,ii+=2){ int write_ijk = (i +write_i) + (j +write_j)*write_jStride + (k +write_k)*write_kStride; int read_ijk = (ii+ read_i) + (jj+ read_j)* read_jStride + (kk+ read_k)* read_kStride; write[write_ijk] = ( read[read_ijk ]+read[read_ijk+1 ] + read[read_ijk +read_jStride ]+read[read_ijk+1+read_jStride ] + read[read_ijk +read_kStride]+read[read_ijk+1 +read_kStride] + read[read_ijk +read_jStride+read_kStride]+read[read_ijk+1+read_jStride+read_kStride] ) * 0.125; }}}break; case RESTRICT_FACE_I: for(k=0,kk=0;k<dim_k;k++,kk+=2){ for(j=0,jj=0;j<dim_j;j++,jj+=2){ for(i=0,ii=0;i<dim_i;i++,ii+=2){ int write_ijk = (i +write_i) + (j +write_j)*write_jStride + (k +write_k)*write_kStride; int read_ijk = (ii+ read_i) + (jj+ read_j)* read_jStride + (kk+ read_k)* read_kStride; write[write_ijk] = ( read[read_ijk ] + read[read_ijk+read_jStride ] + read[read_ijk +read_kStride] + read[read_ijk+read_jStride+read_kStride] ) * 0.25; }}}break; case RESTRICT_FACE_J: for(k=0,kk=0;k<dim_k;k++,kk+=2){ for(j=0,jj=0;j<dim_j;j++,jj+=2){ for(i=0,ii=0;i<dim_i;i++,ii+=2){ int write_ijk = (i +write_i) + (j +write_j)*write_jStride + (k +write_k)*write_kStride; int read_ijk = (ii+ read_i) + (jj+ read_j)* read_jStride + (kk+ read_k)* read_kStride; write[write_ijk] = ( read[read_ijk ] + read[read_ijk+1 ] + read[read_ijk +read_kStride] + read[read_ijk+1+read_kStride] ) * 0.25; }}}break; case RESTRICT_FACE_K: for(k=0,kk=0;k<dim_k;k++,kk+=2){ for(j=0,jj=0;j<dim_j;j++,jj+=2){ for(i=0,ii=0;i<dim_i;i++,ii+=2){ int write_ijk = (i +write_i) + (j +write_j)*write_jStride + (k +write_k)*write_kStride; int read_ijk = (ii+ read_i) + (jj+ read_j)* read_jStride + (kk+ read_k)* read_kStride; write[write_ijk] = ( read[read_ijk ] + read[read_ijk+1 ] + read[read_ijk +read_jStride] + read[read_ijk+1+read_jStride] ) * 0.25; }}}break; } } //------------------------------------------------------------------------------------------------------------------------------ // perform a (inter-level) restriction on vector id_f of the fine level and stores the result in vector id_c on the coarse level // restrictionType specifies whether this is either cell-averaged restriction, or one of three face-averaged restrictions // piecewise constant restriction requires neither a ghost zone exchange nor a boundary condition // This is a rather bulk synchronous implementation which packs all MPI buffers before initiating any sends // Similarly, it waits for all remote data before copying any into local boxes. // It does however attempt to overlap local restriction with MPI void restriction(level_type * level_c, int id_c, level_type *level_f, int id_f, int restrictionType){ double _timeCommunicationStart = getTime(); double _timeStart,_timeEnd; int buffer=0; int n; int my_tag = (level_f->tag<<4) | 0x5; #ifdef USE_MPI // by convention, level_f allocates a combined array of requests for both level_f sends and level_c recvs... int nMessages = level_c->restriction[restrictionType].num_recvs + level_f->restriction[restrictionType].num_sends; MPI_Request *recv_requests = level_f->restriction[restrictionType].requests; MPI_Request *send_requests = level_f->restriction[restrictionType].requests + level_c->restriction[restrictionType].num_recvs; // loop through packed list of MPI receives and prepost Irecv's... if(level_c->restriction[restrictionType].num_recvs>0){ _timeStart = getTime(); #ifdef USE_MPI_THREAD_MULTIPLE #pragma omp parallel for schedule(dynamic,1) #endif for(n=0;n<level_c->restriction[restrictionType].num_recvs;n++){ MPI_Irecv(level_c->restriction[restrictionType].recv_buffers[n], level_c->restriction[restrictionType].recv_sizes[n], MPI_DOUBLE, level_c->restriction[restrictionType].recv_ranks[n], my_tag, MPI_COMM_WORLD, &recv_requests[n] ); } _timeEnd = getTime(); level_f->timers.restriction_recv += (_timeEnd-_timeStart); } // pack MPI send buffers... if(level_f->restriction[restrictionType].num_blocks[0]>0){ _timeStart = getTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->restriction[restrictionType].num_blocks[0]) for(buffer=0;buffer<level_f->restriction[restrictionType].num_blocks[0];buffer++){ restriction_pc_block(level_c,id_c,level_f,id_f,&level_f->restriction[restrictionType].blocks[0][buffer],restrictionType); } _timeEnd = getTime(); level_f->timers.restriction_pack += (_timeEnd-_timeStart); } // loop through MPI send buffers and post Isend's... if(level_f->restriction[restrictionType].num_sends>0){ _timeStart = getTime(); #ifdef USE_MPI_THREAD_MULTIPLE #pragma omp parallel for schedule(dynamic,1) #endif for(n=0;n<level_f->restriction[restrictionType].num_sends;n++){ MPI_Isend(level_f->restriction[restrictionType].send_buffers[n], level_f->restriction[restrictionType].send_sizes[n], MPI_DOUBLE, level_f->restriction[restrictionType].send_ranks[n], my_tag, MPI_COMM_WORLD, &send_requests[n] ); } _timeEnd = getTime(); level_f->timers.restriction_send += (_timeEnd-_timeStart); } #endif // perform local restriction[restrictionType]... try and hide within Isend latency... if(level_f->restriction[restrictionType].num_blocks[1]>0){ _timeStart = getTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->restriction[restrictionType].num_blocks[1]) for(buffer=0;buffer<level_f->restriction[restrictionType].num_blocks[1];buffer++){ restriction_pc_block(level_c,id_c,level_f,id_f,&level_f->restriction[restrictionType].blocks[1][buffer],restrictionType); } _timeEnd = getTime(); level_f->timers.restriction_local += (_timeEnd-_timeStart); } // wait for MPI to finish... #ifdef USE_MPI if(nMessages){ _timeStart = getTime(); MPI_Waitall(nMessages,level_f->restriction[restrictionType].requests,level_f->restriction[restrictionType].status); _timeEnd = getTime(); level_f->timers.restriction_wait += (_timeEnd-_timeStart); } // unpack MPI receive buffers if(level_c->restriction[restrictionType].num_blocks[2]>0){ _timeStart = getTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->restriction[restrictionType].num_blocks[2]) for(buffer=0;buffer<level_c->restriction[restrictionType].num_blocks[2];buffer++){ CopyBlock(level_c,id_c,&level_c->restriction[restrictionType].blocks[2][buffer]); } _timeEnd = getTime(); level_f->timers.restriction_unpack += (_timeEnd-_timeStart); } #endif level_f->timers.restriction_total += (double)(getTime()-_timeCommunicationStart); }
zero_omp.c
/* * File: zero_omp.c * CVS: $Id$ * Author: Philip Mucci * mucci@cs.utk.edu * Mods: Nils Smeds * smeds@pdc.kth.se * Anders Nilsson * anni@pdc.kth.se */ /* This file performs the following test: start, stop and timer functionality for 2 slave OMP threads - It attempts to use the following two counters. It may use less depending on hardware counter resource limitations. These are counted in the default counting domain and default granularity, depending on the platform. Usually this is the user domain (PAPI_DOM_USER) and thread context (PAPI_GRN_THR). + PAPI_FP_INS + PAPI_TOT_CYC Each thread inside the Thread routine: - Get cyc. - Get us. - Start counters - Do flops - Stop and read counters - Get us. - Get cyc. Master serial thread: - Get us. - Get cyc. - Run parallel for loop - Get us. - Get cyc. */ #include "papi_test.h" #ifdef _OPENMP #include <omp.h> #else #error "This compiler does not understand OPENMP" #endif extern int TESTS_QUIET; /* Declared in test_utils.c */ const PAPI_hw_info_t *hw_info = NULL; void Thread( int n ) { int retval, num_tests = 1; int EventSet1 = PAPI_NULL; int PAPI_event, mask1; int num_events1; long long **values; long long elapsed_us, elapsed_cyc; char event_name[PAPI_MAX_STR_LEN]; printf( "Thread 0x%x started\n", omp_get_thread_num( ) ); num_events1 = 2; /* add PAPI_TOT_CYC and one of the events in PAPI_FP_INS, PAPI_FP_OPS or PAPI_TOT_INS, depending on the availability of the event on the platform */ EventSet1 = add_two_events( &num_events1, &PAPI_event, &mask1 ); retval = PAPI_event_code_to_name( PAPI_event, event_name ); if ( retval != PAPI_OK ) test_fail( __FILE__, __LINE__, "PAPI_event_code_to_name", retval ); values = allocate_test_space( num_tests, num_events1 ); elapsed_us = PAPI_get_real_usec( ); elapsed_cyc = PAPI_get_real_cyc( ); retval = PAPI_start( EventSet1 ); if ( retval != PAPI_OK ) test_fail( __FILE__, __LINE__, "PAPI_start", retval ); do_flops( n ); retval = PAPI_stop( EventSet1, values[0] ); if ( retval != PAPI_OK ) test_fail( __FILE__, __LINE__, "PAPI_stop", retval ); elapsed_us = PAPI_get_real_usec( ) - elapsed_us; elapsed_cyc = PAPI_get_real_cyc( ) - elapsed_cyc; remove_test_events( &EventSet1, mask1 ); if ( !TESTS_QUIET ) { printf( "Thread 0x%x %-12s : \t%lld\n", omp_get_thread_num( ), event_name, values[0][1] ); printf( "Thread 0x%x PAPI_TOT_CYC: \t%lld\n", omp_get_thread_num( ), values[0][0] ); printf( "Thread 0x%x Real usec : \t%lld\n", omp_get_thread_num( ), elapsed_us ); printf( "Thread 0x%x Real cycles : \t%lld\n", omp_get_thread_num( ), elapsed_cyc ); } /* It is illegal for the threads to exit in OpenMP */ /* test_pass(__FILE__,0,0); */ free_test_space( values, num_tests ); PAPI_unregister_thread( ); printf( "Thread 0x%x finished\n", omp_get_thread_num( ) ); } int main( int argc, char **argv ) { int maxthr, retval; long long elapsed_us, elapsed_cyc; tests_quiet( argc, argv ); /* Set TESTS_QUIET variable */ retval = PAPI_library_init( PAPI_VER_CURRENT ); if ( retval != PAPI_VER_CURRENT ) test_fail( __FILE__, __LINE__, "PAPI_library_init", retval ); hw_info = PAPI_get_hardware_info( ); if ( hw_info == NULL ) test_fail( __FILE__, __LINE__, "PAPI_get_hardware_info", 2 ); elapsed_us = PAPI_get_real_usec( ); elapsed_cyc = PAPI_get_real_cyc( ); retval = PAPI_thread_init( ( unsigned long ( * )( void ) ) ( omp_get_thread_num ) ); if ( retval != PAPI_OK ) { if ( retval == PAPI_ESBSTR ) test_skip( __FILE__, __LINE__, "PAPI_thread_init", retval ); else test_fail( __FILE__, __LINE__, "PAPI_thread_init", retval ); } #pragma omp parallel private(maxthr) { maxthr = omp_get_num_threads( ); Thread( 1000000 * ( omp_get_thread_num( ) + 1 ) ); } omp_set_num_threads( 1 ); Thread( 1000000 * ( omp_get_thread_num( ) + 1 ) ); omp_set_num_threads( omp_get_max_threads( ) ); #pragma omp parallel private(maxthr) { maxthr = omp_get_num_threads( ); Thread( 1000000 * ( omp_get_thread_num( ) + 1 ) ); } elapsed_cyc = PAPI_get_real_cyc( ) - elapsed_cyc; elapsed_us = PAPI_get_real_usec( ) - elapsed_us; if ( !TESTS_QUIET ) { printf( "Master real usec : \t%lld\n", elapsed_us ); printf( "Master real cycles : \t%lld\n", elapsed_cyc ); } test_pass( __FILE__, NULL, 0 ); exit( 0 ); }
GB_assign_zombie5.c
//------------------------------------------------------------------------------ // GB_assign_zombie5: delete entries in C for C_replace_phase //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // For GrB_Matrix_assign, C(I,J)<M,repl>=..., if C_replace is true, and mask M // is present, then any entry C(i,j) outside IxJ must be be deleted, if // M(i,j)=0. // See also GB_assign_zombie3 and GB_assign_zombie4. #include "GB_assign.h" #include "GB_ek_slice.h" void GB_assign_zombie5 ( GrB_Matrix Z, // the matrix C, or a copy const GrB_Matrix M, const bool Mask_comp, const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], GB_Context Context ) { //-------------------------------------------------------------------------- // get Z //-------------------------------------------------------------------------- const int64_t *restrict Zh = Z->h ; const int64_t *restrict Zp = Z->p ; // const int64_t Znvec = Z->nvec ; int64_t *restrict Zi = Z->i ; int64_t nzombies = Z->nzombies ; const int64_t znz = GB_NNZ (Z) ; //-------------------------------------------------------------------------- // get M //-------------------------------------------------------------------------- const int64_t *restrict Mh = M->h ; const int64_t *restrict Mp = M->p ; const int64_t *restrict Mi = M->i ; const GB_void *restrict Mx = M->x ; const size_t msize = M->type->size ; const GB_cast_function cast_M = GB_cast_factory (GB_BOOL_code, M->type->code) ; const int64_t Mnvec = M->nvec ; const bool M_is_hyper = M->is_hyper ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (znz, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ; //-------------------------------------------------------------------------- // slice the entries for each task //-------------------------------------------------------------------------- // Task tid does entries pstart_slice [tid] to pstart_slice [tid+1]-1 and // vectors kfirst_slice [tid] to klast_slice [tid]. The first and last // vectors may be shared with prior slices and subsequent slices. int64_t pstart_slice [ntasks+1] ; int64_t kfirst_slice [ntasks] ; int64_t klast_slice [ntasks] ; GB_ek_slice (pstart_slice, kfirst_slice, klast_slice, Z, ntasks) ; //-------------------------------------------------------------------------- // each task creates its own zombies //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (int tid = 0 ; tid < ntasks ; tid++) { //---------------------------------------------------------------------- // get the task description //---------------------------------------------------------------------- int64_t kfirst = kfirst_slice [tid] ; int64_t klast = klast_slice [tid] ; //---------------------------------------------------------------------- // scan vectors kfirst to klast for entries to delete //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get Z(:,j) and determine if j is outside the list J //------------------------------------------------------------------ int64_t j = (Zh == NULL) ? k : Zh [k] ; // j_outside is true if column j is outside the Z(I,J) submatrix bool j_outside = !GB_ij_is_in_list (J, nJ, j, Jkind, Jcolon) ; int64_t pZ_start, pZ_end ; GB_get_pA_and_pC (&pZ_start, &pZ_end, NULL, tid, k, kfirst, klast, pstart_slice, NULL, NULL, Zp) ; //------------------------------------------------------------------ // get M(:,j) //------------------------------------------------------------------ int64_t pM_start, pM_end ; int64_t pleft = 0 ; int64_t pright = Mnvec - 1 ; GB_lookup (M_is_hyper, Mh, Mp, &pleft, pright, j, &pM_start, &pM_end) ; //------------------------------------------------------------------ // iterate over all entries in Z(:,j) //------------------------------------------------------------------ for (int64_t pZ = pZ_start ; pZ < pZ_end ; pZ++) { //-------------------------------------------------------------- // consider Z(i,j) //-------------------------------------------------------------- // Z(i,j) is outside the Z(I,J) submatrix if either i is // not in the list I, or j is not in J, or both. int64_t i = Zi [pZ] ; if (!GB_IS_ZOMBIE (i) && (j_outside || !GB_ij_is_in_list (I, nI, i, Ikind, Icolon))) { //---------------------------------------------------------- // Z(i,j) is a live entry not in the Z(I,J) submatrix //---------------------------------------------------------- // Check the mask M to see if it should be deleted. int64_t pM = pM_start ; int64_t pright = pM_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Mi, pM, pright, found) ; bool mij = false ; if (found) { // found it cast_M (&mij, Mx +(pM*msize), 0) ; } if (Mask_comp) { // negate the mask if Mask_comp is true mij = !mij ; } if (!mij) { // delete Z(i,j) by marking it as a zombie nzombies++ ; Zi [pZ] = GB_FLIP (i) ; } } } } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- Z->nzombies = nzombies ; }
GB_unop__identity_uint16_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint16_fc64 // op(A') function: GB_unop_tran__identity_uint16_fc64 // C type: uint16_t // A type: GxB_FC64_t // cast: uint16_t cij = GB_cast_to_uint16_t (creal (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = GB_cast_to_uint16_t (creal (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = GB_cast_to_uint16_t (creal (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint16_fc64 ( uint16_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t (creal (aij)) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint16_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
slice.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_KERNELS_SLICE_H_ #define MACE_KERNELS_SLICE_H_ #include <memory> #include <functional> #include <vector> #include "mace/core/future.h" #include "mace/core/tensor.h" #include "mace/core/types.h" #include "mace/public/mace.h" #ifdef MACE_ENABLE_OPENCL #include "mace/core/runtime/opencl/cl2_header.h" #endif // MACE_ENABLE_OPENCL namespace mace { namespace kernels { struct SliceFunctorBase { explicit SliceFunctorBase(const int32_t axis) : axis_(axis) {} int32_t axis_; }; template<DeviceType D, typename T> struct SliceFunctor : SliceFunctorBase { explicit SliceFunctor(const int32_t axis) : SliceFunctorBase(axis) {} MaceStatus operator()(const Tensor *input, const std::vector<Tensor *> &output_list, StatsFuture *future) { MACE_UNUSED(future); const index_t input_channels = input->dim(axis_); const size_t outputs_count = output_list.size(); const index_t output_channels = input_channels / outputs_count; std::vector<T *> output_ptrs(output_list.size(), nullptr); std::vector<index_t> output_shape(input->shape()); output_shape[axis_] = output_channels; const index_t outer_size = std::accumulate(output_shape.begin(), output_shape.begin() + axis_, 1, std::multiplies<index_t>()); const index_t inner_size = std::accumulate(output_shape.begin() + axis_ + 1, output_shape.end(), 1, std::multiplies<index_t>()); for (size_t i= 0; i < outputs_count; ++i) { MACE_RETURN_IF_ERROR(output_list[i]->Resize(output_shape)); output_ptrs[i] = output_list[i]->mutable_data<T>(); } const T *input_ptr = input->data<T>(); #pragma omp parallel for for (int outer_idx = 0; outer_idx < outer_size; ++outer_idx) { int input_idx = outer_idx * input_channels * inner_size; int output_idx = outer_idx * output_channels * inner_size; for (size_t i = 0; i < outputs_count; ++i) { if (DataTypeCanUseMemcpy(DataTypeToEnum<T>::v())) { memcpy(output_ptrs[i]+output_idx, input_ptr+input_idx, output_channels * inner_size * sizeof(T)); } else { for (index_t k = 0; k < output_channels * inner_size; ++k) { *(output_ptrs[i] + output_idx + k) = *(input_ptr + input_idx + k); } } input_idx += output_channels * inner_size; } } return MACE_SUCCESS; } }; #ifdef MACE_ENABLE_OPENCL template<typename T> struct SliceFunctor<DeviceType::GPU, T> : SliceFunctorBase { explicit SliceFunctor(const int32_t axis) : SliceFunctorBase(axis) {} MaceStatus operator()(const Tensor *input, const std::vector<Tensor *> &output_list, StatsFuture *future); cl::Kernel kernel_; uint32_t kwg_size_; std::unique_ptr<BufferBase> kernel_error_; }; #endif // MACE_ENABLE_OPENCL } // namespace kernels } // namespace mace #endif // MACE_KERNELS_SLICE_H_
test.c
#define LOL 1337 #include <stdlib.h> int main() { #pragma omp parallel for (;;) if (void* lol = malloc(LOL, LOL)) memset(lol, '!', LOL * LOL); }
convolution_sgemm_packn_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_packn_fp16sa_rvv(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); // Mat bottom_im2col(size, maxk, inch, 2u * packn, packn, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const __fp16* bias = _bias; // permute Mat tmp; if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 2u * packn, packn, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 2u * packn, packn, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 2u * packn, packn, opt.workspace_allocator); else tmp.create(maxk, inch, size, 2u * packn, packn, opt.workspace_allocator); { int remain_size_start = 0; int nn_size = size >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; __fp16* tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn; for (int k = 0; k < maxk; k++) { #if C906 #ifdef RVV_SPEC_0_7 asm volatile( "mv t3, %[LEN] \n\t" "mv t1, %[SRC] \n\t" "mv t2, %[TMP] \n\t" "slli t3, t3, 1 \n\t" "vle.v v0, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v1, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v2, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v3, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v4, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v5, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v6, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v7, (t1) \n\t" "add t1, t1, t3 \n\t" "vsseg8e.v v0, (t2) \n\t" : : [LEN] "r"(packn), [SRC] "r"(img0), [TMP] "r"(tmpptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "t1", "t2", "t3"); img0 += size * packn; tmpptr += packn * 8; #else for (int l = 0; l < packn; l++) { tmpptr[0] = img0[l]; tmpptr[1] = img0[l + packn]; tmpptr[2] = img0[l + packn * 2]; tmpptr[3] = img0[l + packn * 3]; tmpptr[4] = img0[l + packn * 4]; tmpptr[5] = img0[l + packn * 5]; tmpptr[6] = img0[l + packn * 6]; tmpptr[7] = img0[l + packn * 7]; tmpptr += 8; } img0 += size * packn; #endif #else vfloat16m1_t _val0 = vle16_v_f16m1(img0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(img0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(img0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(img0 + packn * 3, vl); vfloat16m1_t _val4 = vle16_v_f16m1(img0 + packn * 4, vl); vfloat16m1_t _val5 = vle16_v_f16m1(img0 + packn * 5, vl); vfloat16m1_t _val6 = vle16_v_f16m1(img0 + packn * 6, vl); vfloat16m1_t _val7 = vle16_v_f16m1(img0 + packn * 7, vl); vsseg8e16_v_f16m1x8(tmpptr, vcreate_f16m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl); img0 += size * packn; tmpptr += packn * 8; #endif } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn; for (int k = 0; k < maxk; k++) { #if C906 #ifdef RVV_SPEC_0_7 asm volatile( "mv t3, %[LEN] \n\t" "mv t1, %[SRC] \n\t" "mv t2, %[TMP] \n\t" "slli t3, t3, 1 \n\t" "vle.v v0, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v1, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v2, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v3, (t1) \n\t" "vsseg4e.v v0, (t2) \n\t" : : [LEN] "r"(packn), [SRC] "r"(img0), [TMP] "r"(tmpptr) : "cc", "memory", "v0", "v1", "v2", "v3", "t1", "t2", "t3"); img0 += size * packn; tmpptr += packn * 4; #else for (int l = 0; l < packn; l++) { tmpptr[0] = img0[l]; tmpptr[1] = img0[l + packn]; tmpptr[2] = img0[l + packn * 2]; tmpptr[3] = img0[l + packn * 3]; tmpptr += 4; } img0 += size * packn; #endif #else vfloat16m1_t _val0 = vle16_v_f16m1(img0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(img0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(img0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(img0 + packn * 3, vl); vsseg4e16_v_f16m1x4(tmpptr, vcreate_f16m1x4(_val0, _val1, _val2, _val3), vl); img0 += size * packn; tmpptr += packn * 4; #endif } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn; for (int k = 0; k < maxk; k++) { #if C906 #ifdef RVV_SPEC_0_7 asm volatile( "mv t3, %[LEN] \n\t" "mv t1, %[SRC] \n\t" "mv t2, %[TMP] \n\t" "slli t3, t3, 1 \n\t" "vle.v v0, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v1, (t1) \n\t" "add t1, t1, t3 \n\t" : : [LEN] "r"(packn), [SRC] "r"(img0), [TMP] "r"(tmpptr) : "cc", "memory", "v0", "v1", "t1", "t2", "t3"); img0 += size * packn; tmpptr += packn * 2; #else for (int l = 0; l < packn; l++) { tmpptr[0] = img0[l]; tmpptr[1] = img0[l + packn]; tmpptr += 2; } img0 += size * packn; #endif #else vfloat16m1_t _val0 = vle16_v_f16m1(img0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(img0 + packn, vl); vsseg2e16_v_f16m1x2(tmpptr, vcreate_f16m1x2(_val0, _val1), vl); img0 += size * packn; tmpptr += packn * 2; #endif } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn; for (int k = 0; k < maxk; k++) { vfloat16m1_t _val = vle16_v_f16m1(img0, vl); vse16_v_f16m1(tmpptr, _val, vl); img0 += size * packn; tmpptr += packn; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr0 = top_blob.channel(p); int i = 0; for (; i + 7 < size; i += 8) { const __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum4 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum5 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum6 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum7 = vfmv_v_f_f16m1(0.f, vl); if (bias) { _sum0 = vle16_v_f16m1(bias + p * packn, vl); _sum1 = vle16_v_f16m1(bias + p * packn, vl); _sum2 = vle16_v_f16m1(bias + p * packn, vl); _sum3 = vle16_v_f16m1(bias + p * packn, vl); _sum4 = vle16_v_f16m1(bias + p * packn, vl); _sum5 = vle16_v_f16m1(bias + p * packn, vl); _sum6 = vle16_v_f16m1(bias + p * packn, vl); _sum7 = vle16_v_f16m1(bias + p * packn, vl); } for (int j = 0; j < nn; j++) { __fp16 val0 = *tmpptr++; __fp16 val1 = *tmpptr++; __fp16 val2 = *tmpptr++; __fp16 val3 = *tmpptr++; __fp16 val4 = *tmpptr++; __fp16 val5 = *tmpptr++; __fp16 val6 = *tmpptr++; __fp16 val7 = *tmpptr++; vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); _sum4 = vfmacc_vf_f16m1(_sum4, val4, _w0, vl); _sum5 = vfmacc_vf_f16m1(_sum5, val5, _w0, vl); _sum6 = vfmacc_vf_f16m1(_sum6, val6, _w0, vl); _sum7 = vfmacc_vf_f16m1(_sum7, val7, _w0, vl); kptr0 += packn; } vse16_v_f16m1(outptr0, _sum0, vl); vse16_v_f16m1(outptr0 + packn, _sum1, vl); vse16_v_f16m1(outptr0 + packn * 2, _sum2, vl); vse16_v_f16m1(outptr0 + packn * 3, _sum3, vl); vse16_v_f16m1(outptr0 + packn * 4, _sum4, vl); vse16_v_f16m1(outptr0 + packn * 5, _sum5, vl); vse16_v_f16m1(outptr0 + packn * 6, _sum6, vl); vse16_v_f16m1(outptr0 + packn * 7, _sum7, vl); outptr0 += packn * 8; } for (; i + 3 < size; i += 4) { const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); if (bias) { _sum0 = vle16_v_f16m1(bias + p * packn, vl); _sum1 = vle16_v_f16m1(bias + p * packn, vl); _sum2 = vle16_v_f16m1(bias + p * packn, vl); _sum3 = vle16_v_f16m1(bias + p * packn, vl); } for (int j = 0; j < nn; j++) { __fp16 val0 = *tmpptr++; __fp16 val1 = *tmpptr++; __fp16 val2 = *tmpptr++; __fp16 val3 = *tmpptr++; vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); kptr0 += packn; } vse16_v_f16m1(outptr0, _sum0, vl); vse16_v_f16m1(outptr0 + packn, _sum1, vl); vse16_v_f16m1(outptr0 + packn * 2, _sum2, vl); vse16_v_f16m1(outptr0 + packn * 3, _sum3, vl); outptr0 += packn * 4; } for (; i + 1 < size; i += 2) { const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); if (bias) { _sum0 = vle16_v_f16m1(bias + p * packn, vl); _sum1 = vle16_v_f16m1(bias + p * packn, vl); } for (int j = 0; j < nn; j++) { __fp16 val0 = *tmpptr++; __fp16 val1 = *tmpptr++; vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); kptr0 += packn; } vse16_v_f16m1(outptr0, _sum0, vl); vse16_v_f16m1(outptr0 + packn, _sum1, vl); outptr0 += packn * 2; } for (; i < size; i++) { const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk * packn; // inch always > 0 vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); if (bias) { _sum = vle16_v_f16m1(bias + p * packn, vl); } for (int j = 0; j < nn; j++) { __fp16 val = *tmpptr++; vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl); _sum = vfmacc_vf_f16m1(_sum, val, _w0, vl); kptr0 += packn; } vse16_v_f16m1(outptr0, _sum, vl); outptr0 += packn; } } } static void convolution_im2col_sgemm_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 2u * packn, packn, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * packn; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); __fp16* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const __fp16* sptr = img.row<const __fp16>(dilation_h * u) + dilation_w * v * packn; for (int i = 0; i < outh; i++) { int j = 0; for (; j < outw; j++) { vfloat16m1_t _val = vle16_v_f16m1(sptr, vl); vse16_v_f16m1(ptr, _val, vl); sptr += stride_w * packn; ptr += packn; } sptr += gap; } } } } } im2col_sgemm_packn_fp16sa_rvv(bottom_im2col, top_blob, kernel, _bias, opt); }
omp_master_3.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include "omp_testsuite.h" int test_omp_master_3() { int nthreads; int executing_thread; int tid_result = 0; /* counts up the number of wrong thread no. for the master thread. (Must be 0) */ nthreads = 0; executing_thread = -1; #pragma omp parallel { #pragma omp master { int tid = omp_get_thread_num(); if (tid != 0) { #pragma omp critical { tid_result++; } } #pragma omp critical { nthreads++; } executing_thread = omp_get_thread_num (); } /* end of master*/ } /* end of parallel*/ return ((nthreads == 1) && (executing_thread == 0) && (tid_result == 0)); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_master_3()) { num_failed++; } } return num_failed; }
LRBreakup.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign // Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign // Raymond Clay III, j.k.rofling@gmail.com, Lawrence Livermore National Laboratory // Ye Luo, yeluo@anl.gov, Argonne National Laboratory // Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory // // File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign ////////////////////////////////////////////////////////////////////////////////////// #ifndef QMCPLUSPLUS_LRBREAKUP_H #define QMCPLUSPLUS_LRBREAKUP_H #include "Configuration.h" #include "Particle/ParticleSet.h" #include "LongRange/KContainer.h" #include "CPU/BLAS.hpp" #include <cassert> namespace qmcplusplus { template<class BreakupBasis> struct LRBreakup { DECLARE_COULOMB_TYPES //Typedef for the lattice-type. We don't need the full particle-set. typedef ParticleSet::ParticleLayout_t ParticleLayout_t; //We use an internal k-list with degeneracies to do the breakup. //We do this because the number of vectors is much larger than we'd //use elsewhere. void AddKToList(mRealType k, mRealType degeneracy = 1.0); ///The basis to be used for breakup. BreakupBasis& Basis; /// For each k, KList[k][0] = |k| and KList[k][1] = degeneracy std::vector<TinyVector<mRealType, 2>> KList; /** setup KList * @param kc k-space cutoff for long-range sums * @param kcont k at which approximate (spherical shell) degeneracies are used. * @param kmax largest k used for performing the breakup * @return the maximum kshell for the given kc */ int SetupKVecs(mRealType kc, mRealType kcont, mRealType kmax); //Fk is FT of F_full(r) up to kmax //adjust is used for constraining values in the breakup /* REPLACED SO WE CAN USE TYPES OTHER THAN STL VECTOR. mRealType DoBreakup(const std::vector<mRealType> &Fk, std::vector<mRealType> &t, const std::vector<bool> &adjust); mRealType DoBreakup(const std::vector<mRealType> &Fk, std::vector<mRealType> &t); */ mRealType DoBreakup(mRealType* Fk, mRealType* t, mRealType* adjust); mRealType DoGradBreakup(mRealType* Fk, mRealType* t, mRealType* adjust); mRealType DoStrainBreakup(mRealType* Fk, mRealType* dFk, mRealType* t, mRealType* adjust); void DoAllBreakup(mRealType* chisqr, mRealType* Fk, mRealType* dFk, mRealType* t, mRealType* gt, mRealType* dt, mRealType* adjust); mRealType DoBreakup(mRealType* Fk, mRealType* t) { const mRealType tolerance = std::numeric_limits<mRealType>::epsilon(); //t must be allocated up to Basis.NumBasisElem(); //Fk must be allocated and filled up to KList.size(); // assert(t.size()==Basis.NumBasisElem()); Matrix<mRealType> A; std::vector<mRealType> b; Matrix<mRealType> cnk; int numElem = Basis.NumBasisElem(); //t.size(); A.resize(numElem, numElem); b.resize(numElem, 0.0); cnk.resize(numElem, KList.size()); // Fill in cnk. // app_log() << "Check OMP size : numElem, KList.size : " << numElem << " , " << KList.size() << std::endl; #pragma omp parallel for shared(cnk) for (int n = 0; n < numElem; n++) { for (int ki = 0; ki < KList.size(); ki++) { mRealType k = KList[ki][0]; cnk(n, ki) = Basis.c(n, k); } } // Now, fill in A and b A = 0.0; for (int l = 0; l < numElem; l++) { for (int ki = 0; ki < KList.size(); ki++) { b[l] += KList[ki][1] * Fk[ki] * cnk(l, ki); for (int n = 0; n < numElem; n++) A(l, n) += KList[ki][1] * cnk(l, ki) * cnk(n, ki); } } ////////////////////////// //Do the SVD: // Matrix<mRealType> U(numElem, numElem), V(numElem, numElem); // std::vector<mRealType> S(numElem), Sinv(numElem); ////////////////////////// // SVdecomp(A, U, S, V); ////////////////////////// int M = A.rows(); int N = A.cols(); Matrix<mRealType> Atrans(N, M); Matrix<mRealType> U, V; U.resize(std::min(M, N), M); V.resize(N, std::min(M, N)); std::vector<mRealType> S, Sinv; S.resize(std::min(N, M)); //Do the transpose for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) Atrans(j, i) = A(i, j); } char JOBU = 'S'; char JOBVT = 'S'; int LDA = M; int LDU = M; int LDVT = std::min(M, N); int LWORK = 10 * std::max(3 * std::min(N, M) + std::max(M, N), 5 * std::min(M, N)); std::vector<mRealType> WORK(LWORK); int INFO; LAPACK::gesvd(&JOBU, &JOBVT, &M, &N, Atrans.data(), &LDA, &S[0], U.data(), &LDU, V.data(), &LDVT, &WORK[0], &LWORK, &INFO); assert(INFO == 0); int ur = U.rows(); int uc = U.cols(); Matrix<mRealType> Utrans(uc, ur); for (int i = 0; i < ur; i++) { for (int j = 0; j < uc; j++) Utrans(j, i) = U(i, j); } U.resize(uc, ur); U = Utrans; /////////////////////////////////// // Zero out near-singular values mRealType Smax = S[0]; for (int i = 1; i < S.size(); i++) Smax = std::max(S[i], Smax); Sinv.resize(S.size()); for (int i = 0; i < S.size(); i++) Sinv[i] = (S[i] < (tolerance * Smax)) ? 0.0 : (1.0 / S[i]); int numSingular = 0; for (int i = 0; i < Sinv.size(); i++) if (Sinv[i] == 0.0) numSingular++; if (numSingular > 0) std::cout << "There were " << numSingular << " singular values in breakup.\n"; for (int i = 0; i < numElem; i++) t[i] = 0.0; // Compute t_n, removing singular values for (int i = 0; i < numElem; i++) { mRealType coef = 0.0; for (int j = 0; j < numElem; j++) coef += U(j, i) * b[j]; coef *= Sinv[i]; for (int k = 0; k < numElem; k++) t[k] += coef * V(k, i); } // Calculate chi-squared mRealType Yk, chi2; chi2 = 0.0; for (int ki = 0; ki < KList.size(); ki++) { Yk = Fk[ki]; for (int n = 0; n < numElem; n++) { Yk -= cnk(n, ki) * t[n]; } chi2 += KList[ki][1] * Yk * Yk; } return (chi2); } //The constructor. Call the constructor of basis... //set up the basis parameters too. LRBreakup(BreakupBasis& bref) : Basis(bref) { /*Do Nothing*/ } mRealType DoGradBreakup(mRealType* Fk, mRealType* t) { const mRealType tolerance = std::numeric_limits<mRealType>::epsilon(); //t must be allocated up to Basis.NumBasisElem(); //Fk must be allocated and filled up to KList.size(); // assert(t.size()==Basis.NumBasisElem()); Matrix<mRealType> A; std::vector<mRealType> b; Matrix<mRealType> cnk; int numElem = Basis.NumBasisElem(); //t.size(); A.resize(numElem, numElem); b.resize(numElem, 0.0); cnk.resize(numElem, KList.size()); // Fill in cnk. for (int n = 0; n < numElem; n++) { for (int ki = 0; ki < KList.size(); ki++) { mRealType k = KList[ki][0]; cnk(n, ki) = Basis.c(n, k); } } // Now, fill in A and b A = 0.0; for (int l = 0; l < numElem; l++) { for (int ki = 0; ki < KList.size(); ki++) { mRealType k2 = KList[ki][0] * KList[ki][0]; b[l] += k2 * KList[ki][1] * Fk[ki] * cnk(l, ki); for (int n = 0; n < numElem; n++) A(l, n) += k2 * KList[ki][1] * cnk(l, ki) * cnk(n, ki); } } ////////////////////////// //Do the SVD: // Matrix<mRealType> U(numElem, numElem), V(numElem, numElem); // std::vector<mRealType> S(numElem), Sinv(numElem); ////////////////////////// // SVdecomp(A, U, S, V); ////////////////////////// int M = A.rows(); int N = A.cols(); Matrix<mRealType> Atrans(N, M); Matrix<mRealType> U, V; U.resize(std::min(M, N), M); V.resize(N, std::min(M, N)); std::vector<mRealType> S, Sinv; S.resize(std::min(N, M)); //Do the transpose for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) Atrans(j, i) = A(i, j); } char JOBU = 'S'; char JOBVT = 'S'; int LDA = M; int LDU = M; int LDVT = std::min(M, N); int LWORK = 10 * std::max(3 * std::min(N, M) + std::max(M, N), 5 * std::min(M, N)); std::vector<mRealType> WORK(LWORK); int INFO; LAPACK::gesvd(&JOBU, &JOBVT, &M, &N, Atrans.data(), &LDA, &S[0], U.data(), &LDU, V.data(), &LDVT, &WORK[0], &LWORK, &INFO); assert(INFO == 0); int ur = U.rows(); int uc = U.cols(); Matrix<mRealType> Utrans(uc, ur); for (int i = 0; i < ur; i++) { for (int j = 0; j < uc; j++) Utrans(j, i) = U(i, j); } U.resize(uc, ur); U = Utrans; /////////////////////////////////// // Zero out near-singular values mRealType Smax = S[0]; for (int i = 1; i < S.size(); i++) Smax = std::max(S[i], Smax); Sinv.resize(S.size()); for (int i = 0; i < S.size(); i++) Sinv[i] = (S[i] < (tolerance * Smax)) ? 0.0 : (1.0 / S[i]); int numSingular = 0; for (int i = 0; i < Sinv.size(); i++) if (Sinv[i] == 0.0) numSingular++; if (numSingular > 0) std::cout << "There were " << numSingular << " singular values in breakup.\n"; for (int i = 0; i < numElem; i++) t[i] = 0.0; // Compute t_n, removing singular values for (int i = 0; i < numElem; i++) { mRealType coef = 0.0; for (int j = 0; j < numElem; j++) coef += U(j, i) * b[j]; coef *= Sinv[i]; for (int k = 0; k < numElem; k++) t[k] += coef * V(k, i); } // Calculate chi-squared mRealType Yk, chi2; chi2 = 0.0; for (int ki = 0; ki < KList.size(); ki++) { mRealType k2 = KList[ki][0] * KList[ki][0]; Yk = Fk[ki]; for (int n = 0; n < numElem; n++) { Yk -= cnk(n, ki) * t[n]; } chi2 += k2 * KList[ki][1] * Yk * Yk; } return (chi2); } }; template<class BreakupBasis> void LRBreakup<BreakupBasis>::AddKToList(mRealType k, mRealType degeneracy /* =1.0 */) { //Search for this k already in list int ki = 0; while ((ki < KList.size()) && (std::abs(k - KList[ki][0]) > 1.0e-12)) ki++; if (ki == KList.size()) { TinyVector<mRealType, 2> temp(k, degeneracy); KList.push_back(temp); } else KList[ki][1] += degeneracy; } template<class BreakupBasis> int LRBreakup<BreakupBasis>::SetupKVecs(mRealType kc, mRealType kcont, mRealType kmax) { //Add low |k| ( < kcont) k-points with exact degeneracy KContainer kexact; kexact.UpdateKLists(Basis.get_Lattice(), kcont); bool findK = true; mRealType kc2 = kc * kc; //use at least one shell size_t ks = 0; kc2 = std::max(kc2, static_cast<mRealType>(kexact.ksq[kexact.kshell[ks]])); while (findK) { if (kexact.ksq[kexact.kshell[ks]] > kc2) findK = false; else ks++; } size_t maxkshell = ks; size_t numk = kexact.numk - kexact.kshell[ks]; for (; ks < kexact.kshell.size() - 1; ks++) AddKToList(std::sqrt(kexact.ksq[kexact.kshell[ks]]), kexact.kshell[ks + 1] - kexact.kshell[ks]); ////Add these vectors to the internal list //int numk=0; //mRealType modk2; //for(int ki=0; ki<kexact.numk; ki++) { // modk2 = dot(kexact.kpts_cart[ki],kexact.kpts_cart[ki]); // if(modk2 > (kc*kc)) { //Breakup needs kc < k < kcont. // AddKToList(std::sqrt(modk2)); // numk++; // } //} //Add high |k| ( >kcont, <kmax) k-points with approximate degeneracy //Volume of 1 K-point is (2pi)^3/(a1.a2^a3) #if OHMMS_DIM == 3 mRealType kelemvol = 8 * M_PI * M_PI * M_PI / Basis.get_CellVolume(); //Generate 4000 shells: const int N = 4000; mRealType deltak = (kmax - kcont) / N; for (int i = 0; i < N; i++) { mRealType k1 = kcont + deltak * i; mRealType k2 = k1 + deltak; mRealType kmid = 0.5 * (k1 + k2); mRealType shellvol = 4.0 * M_PI * (k2 * k2 * k2 - k1 * k1 * k1) / 3.0; mRealType degeneracy = shellvol / kelemvol; AddKToList(kmid, degeneracy); numk += static_cast<int>(degeneracy); } #elif OHMMS_DIM == 2 mRealType kelemvol = 4 * M_PI * M_PI / Basis.get_CellVolume(); //Generate 8000 shells: const int N = 8000; mRealType deltak = (kmax - kcont) / N; for (int i = 0; i < N; i++) { mRealType k1 = kcont + deltak * i; mRealType k2 = k1 + deltak; mRealType kmid = 0.5 * (k1 + k2); mRealType shellvol = M_PI * (k2 * k2 - k1 * k1); mRealType degeneracy = shellvol / kelemvol; AddKToList(kmid, degeneracy); numk += static_cast<int>(degeneracy); } #endif app_log() << " NUMBER OF OPT_BREAK KVECS = " << numk << std::endl; return maxkshell; //numk now contains the total number of vectors. //this->klist.size() contains the number of unique vectors. } //Do the constrained breakup template<class BreakupBasis> typename LRBreakup<BreakupBasis>::mRealType LRBreakup<BreakupBasis>::DoBreakup(mRealType* Fk, mRealType* t, mRealType* adjust) { const mRealType tolerance = std::numeric_limits<mRealType>::epsilon(); //t and adjust must be allocated up to Basis.NumBasisElem(); //Fk must be allocated and filled up to KList.size(); // assert(t.size()==adjust.size()); // assert(t.size()==Basis.NumBasisElem()); Matrix<mRealType> A; std::vector<mRealType> b; Matrix<mRealType> cnk; int N = Basis.NumBasisElem(); //t.size(); A.resize(N, N); b.resize(N, 0.0); cnk.resize(N, KList.size()); //Fill in cnk. for (int n = 0; n < N; n++) { for (int ki = 0; ki < KList.size(); ki++) { mRealType k = KList[ki][0]; cnk(n, ki) = Basis.c(n, k); } } //Fill in A and b A = 0.0; for (int l = 0; l < N; l++) { for (int ki = 0; ki < KList.size(); ki++) { b[l] += KList[ki][1] * Fk[ki] * cnk(l, ki); for (int n = 0; n < N; n++) A(l, n) += KList[ki][1] * cnk(l, ki) * cnk(n, ki); } } //Reduce for constraints int M = N; for (int i = 0; i < N; i++) if (!adjust[i]) M--; //The c is for "constrained" Matrix<mRealType> Ac; Ac.resize(M, M); std::vector<mRealType> bc(M, 0.0), tc(M, 0.0); //Build constrained Ac and bc int j = 0; for (int col = 0; col < N; col++) { if (adjust[col]) { // Copy column a A to Ac int i = 0; for (int row = 0; row < N; row++) if (adjust[row]) { Ac(i, j) = A(row, col); i++; } j++; } else { // Otherwise, subtract t(col)*A(:,col) from bc for (int row = 0; row < N; row++) b[row] -= A(row, col) * t[col]; } } j = 0; for (int row = 0; row < N; row++) if (adjust[row]) { bc[j] = b[row]; j++; } // Do SVD: // ------- // Matrix<mRealType> U(M, M), V(M, M); // std::vector<mRealType> S(M), Sinv(M); // SVdecomp(Ac, U, S, V); //////////////////////////////// int m = Ac.rows(); int n = Ac.cols(); Matrix<mRealType> Atrans(n, m); Matrix<mRealType> U, V; U.resize(std::min(m, n), m); V.resize(n, std::min(m, n)); std::vector<mRealType> S, Sinv; S.resize(std::min(n, m)); //do the transpose for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) Atrans(j, i) = Ac(i, j); } char JOBU = 'S'; char JOBVT = 'S'; int LDA = m; int LDU = m; int LDVT = std::min(m, n); int LWORK = 10 * std::max(3 * std::min(n, m) + std::max(m, n), 5 * std::min(m, n)); std::vector<mRealType> WORK(LWORK); int INFO; LAPACK::gesvd(&JOBU, &JOBVT, &m, &n, Atrans.data(), &LDA, &S[0], U.data(), &LDU, V.data(), &LDVT, &WORK[0], &LWORK, &INFO); assert(INFO == 0); int ur = U.rows(); int uc = U.cols(); Matrix<mRealType> Utrans(uc, ur); for (int i = 0; i < ur; i++) { for (int j = 0; j < uc; j++) Utrans(j, i) = U(i, j); } U.resize(uc, ur); U = Utrans; ////////////////////////////////// // Zero out near-singular values mRealType Smax = S[0]; for (int i = 1; i < M; i++) Smax = std::max(S[i], Smax); for (int i = 0; i < M; i++) if (S[i] < 0.0) std::cout << "negative singlar value.\n"; // perr << "Smax = " << Smax << std::endl; Sinv.resize(S.size()); for (int i = 0; i < M; i++) Sinv[i] = (S[i] < (tolerance * Smax)) ? 0.0 : (1.0 / S[i]); int numSingular = 0; for (int i = 0; i < Sinv.size(); i++) if (Sinv[i] == 0.0) numSingular++; if (numSingular > 0) std::cout << "There were " << numSingular << " singular values in breakup.\n"; // Compute t_n, removing singular values for (int i = 0; i < M; i++) { mRealType coef = 0.0; for (int j = 0; j < M; j++) coef += U(j, i) * bc[j]; coef *= Sinv[i]; for (int k = 0; k < M; k++) tc[k] += coef * V(k, i); } // Now copy tc values into t j = 0; for (int i = 0; i < N; i++) if (adjust[i]) { t[i] = tc[j]; j++; } // Calculate chi-squared mRealType Yk, chi2; chi2 = 0.0; for (int ki = 0; ki < KList.size(); ki++) { Yk = Fk[ki]; for (int n = 0; n < N; n++) { Yk -= cnk(n, ki) * t[n]; } chi2 += KList[ki][1] * Yk * Yk; } return (chi2); } template<class BreakupBasis> typename LRBreakup<BreakupBasis>::mRealType LRBreakup<BreakupBasis>::DoGradBreakup(mRealType* Fk, mRealType* t, mRealType* adjust) { const mRealType tolerance = std::numeric_limits<mRealType>::epsilon(); //t and adjust must be allocated up to Basis.NumBasisElem(); //Fk must be allocated and filled up to KList.size(); // assert(t.size()==adjust.size()); // assert(t.size()==Basis.NumBasisElem()); Matrix<mRealType> A; std::vector<mRealType> b; Matrix<mRealType> cnk; int N = Basis.NumBasisElem(); //t.size(); A.resize(N, N); b.resize(N, 0.0); cnk.resize(N, KList.size()); //Fill in cnk. for (int n = 0; n < N; n++) { for (int ki = 0; ki < KList.size(); ki++) { mRealType k = KList[ki][0]; cnk(n, ki) = Basis.c(n, k); } } //Fill in A and b A = 0.0; for (int l = 0; l < N; l++) { for (int ki = 0; ki < KList.size(); ki++) { mRealType k2 = KList[ki][0] * KList[ki][0]; b[l] += k2 * KList[ki][1] * Fk[ki] * cnk(l, ki); for (int n = 0; n < N; n++) A(l, n) += k2 * KList[ki][1] * cnk(l, ki) * cnk(n, ki); } } //Reduce for constraints int M = N; for (int i = 0; i < N; i++) if (!adjust[i]) M--; //The c is for "constrained" Matrix<mRealType> Ac; Ac.resize(M, M); std::vector<mRealType> bc(M, 0.0), tc(M, 0.0); //Build constrained Ac and bc int j = 0; for (int col = 0; col < N; col++) { if (adjust[col]) { // Copy column a A to Ac int i = 0; for (int row = 0; row < N; row++) if (adjust[row]) { Ac(i, j) = A(row, col); i++; } j++; } else { // Otherwise, subtract t(col)*A(:,col) from bc for (int row = 0; row < N; row++) b[row] -= A(row, col) * t[col]; } } j = 0; for (int row = 0; row < N; row++) if (adjust[row]) { bc[j] = b[row]; j++; } // Do SVD: // ------- // Matrix<mRealType> U(M, M), V(M, M); // std::vector<mRealType> S(M), Sinv(M); // SVdecomp(Ac, U, S, V); //////////////////////////////// int m = Ac.rows(); int n = Ac.cols(); Matrix<mRealType> Atrans(n, m); Matrix<mRealType> U, V; U.resize(std::min(m, n), m); V.resize(n, std::min(m, n)); std::vector<mRealType> S, Sinv; S.resize(std::min(n, m)); //do the transpose for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) Atrans(j, i) = Ac(i, j); } char JOBU = 'S'; char JOBVT = 'S'; int LDA = m; int LDU = m; int LDVT = std::min(m, n); int LWORK = 10 * std::max(3 * std::min(n, m) + std::max(m, n), 5 * std::min(m, n)); std::vector<mRealType> WORK(LWORK); int INFO; LAPACK::gesvd(&JOBU, &JOBVT, &m, &n, Atrans.data(), &LDA, &S[0], U.data(), &LDU, V.data(), &LDVT, &WORK[0], &LWORK, &INFO); assert(INFO == 0); int ur = U.rows(); int uc = U.cols(); Matrix<mRealType> Utrans(uc, ur); for (int i = 0; i < ur; i++) { for (int j = 0; j < uc; j++) Utrans(j, i) = U(i, j); } U.resize(uc, ur); U = Utrans; ////////////////////////////////// // Zero out near-singular values mRealType Smax = S[0]; for (int i = 1; i < M; i++) Smax = std::max(S[i], Smax); for (int i = 0; i < M; i++) if (S[i] < 0.0) std::cout << "negative singlar value.\n"; // perr << "Smax = " << Smax << std::endl; Sinv.resize(S.size()); for (int i = 0; i < M; i++) Sinv[i] = (S[i] < (tolerance * Smax)) ? 0.0 : (1.0 / S[i]); int numSingular = 0; for (int i = 0; i < Sinv.size(); i++) if (Sinv[i] == 0.0) numSingular++; if (numSingular > 0) std::cout << "There were " << numSingular << " singular values in breakup.\n"; // Compute t_n, removing singular values for (int i = 0; i < M; i++) { mRealType coef = 0.0; for (int j = 0; j < M; j++) coef += U(j, i) * bc[j]; coef *= Sinv[i]; for (int k = 0; k < M; k++) tc[k] += coef * V(k, i); } // Now copy tc values into t j = 0; for (int i = 0; i < N; i++) if (adjust[i]) { t[i] = tc[j]; j++; } // Calculate chi-squared mRealType Yk, chi2; chi2 = 0.0; for (int ki = 0; ki < KList.size(); ki++) { Yk = Fk[ki]; for (int n = 0; n < N; n++) { Yk -= cnk(n, ki) * t[n]; } chi2 += KList[ki][0] * KList[ki][0] * KList[ki][1] * Yk * Yk; } return (chi2); } template<class BreakupBasis> typename LRBreakup<BreakupBasis>::mRealType LRBreakup<BreakupBasis>::DoStrainBreakup(mRealType* Fk, mRealType* dFk, mRealType* t, mRealType* adjust) { const mRealType tolerance = std::numeric_limits<mRealType>::epsilon(); //t and adjust must be allocated up to Basis.NumBasisElem(); //Fk must be allocated and filled up to KList.size(); // assert(t.size()==adjust.size()); // assert(t.size()==Basis.NumBasisElem()); Matrix<mRealType> A; std::vector<mRealType> b; Matrix<mRealType> dcnk; int N = Basis.NumBasisElem(); //t.size(); A.resize(N, N); b.resize(N, 0.0); dcnk.resize(N, KList.size()); //Fill in cnk. for (int n = 0; n < N; n++) { for (int ki = 0; ki < KList.size(); ki++) { mRealType k = KList[ki][0]; dcnk(n, ki) = Basis.dc_dk(n, k); //-Basis.c(n,k); } } //Fill in A and b A = 0.0; for (int l = 0; l < N; l++) { for (int ki = 0; ki < KList.size(); ki++) { mRealType k2 = KList[ki][0] * KList[ki][0]; // b[l] += k2*KList[ki][1]*(dFk[ki]-Fk[ki]) * dcnk(l, ki); b[l] += k2 * KList[ki][1] * (dFk[ki]) * dcnk(l, ki); for (int n = 0; n < N; n++) A(l, n) += k2 * KList[ki][1] * dcnk(l, ki) * dcnk(n, ki); } } //Reduce for constraints int M = N; for (int i = 0; i < N; i++) if (!adjust[i]) M--; //The c is for "constrained" Matrix<mRealType> Ac; Ac.resize(M, M); std::vector<mRealType> bc(M, 0.0), tc(M, 0.0); //Build constrained Ac and bc int j = 0; for (int col = 0; col < N; col++) { if (adjust[col]) { // Copy column a A to Ac int i = 0; for (int row = 0; row < N; row++) if (adjust[row]) { Ac(i, j) = A(row, col); i++; } j++; } else { // Otherwise, subtract t(col)*A(:,col) from bc for (int row = 0; row < N; row++) b[row] -= A(row, col) * t[col]; } } j = 0; for (int row = 0; row < N; row++) if (adjust[row]) { bc[j] = b[row]; j++; } // Do SVD: // ------- // Matrix<mRealType> U(M, M), V(M, M); // std::vector<mRealType> S(M), Sinv(M); // SVdecomp(Ac, U, S, V); //////////////////////////////// int m = Ac.rows(); int n = Ac.cols(); Matrix<mRealType> Atrans(n, m); Matrix<mRealType> U, V; U.resize(std::min(m, n), m); V.resize(n, std::min(m, n)); std::vector<mRealType> S, Sinv; S.resize(std::min(n, m)); //do the transpose for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) Atrans(j, i) = Ac(i, j); } char JOBU = 'S'; char JOBVT = 'S'; int LDA = m; int LDU = m; int LDVT = std::min(m, n); int LWORK = 10 * std::max(3 * std::min(n, m) + std::max(m, n), 5 * std::min(m, n)); std::vector<mRealType> WORK(LWORK); int INFO; LAPACK::gesvd(&JOBU, &JOBVT, &m, &n, Atrans.data(), &LDA, &S[0], U.data(), &LDU, V.data(), &LDVT, &WORK[0], &LWORK, &INFO); assert(INFO == 0); int ur = U.rows(); int uc = U.cols(); Matrix<mRealType> Utrans(uc, ur); for (int i = 0; i < ur; i++) { for (int j = 0; j < uc; j++) Utrans(j, i) = U(i, j); } U.resize(uc, ur); U = Utrans; ////////////////////////////////// // Zero out near-singular values mRealType Smax = S[0]; for (int i = 1; i < M; i++) Smax = std::max(S[i], Smax); for (int i = 0; i < M; i++) if (S[i] < 0.0) std::cout << "negative singlar value.\n"; // perr << "Smax = " << Smax << std::endl; Sinv.resize(S.size()); for (int i = 0; i < M; i++) Sinv[i] = (S[i] < (tolerance * Smax)) ? 0.0 : (1.0 / S[i]); int numSingular = 0; for (int i = 0; i < Sinv.size(); i++) if (Sinv[i] == 0.0) numSingular++; if (numSingular > 0) std::cout << "There were " << numSingular << " singular values in breakup.\n"; // Compute t_n, removing singular values for (int i = 0; i < M; i++) { mRealType coef = 0.0; for (int j = 0; j < M; j++) coef += U(j, i) * bc[j]; coef *= Sinv[i]; for (int k = 0; k < M; k++) tc[k] += coef * V(k, i); } // Now copy tc values into t j = 0; for (int i = 0; i < N; i++) if (adjust[i]) { t[i] = tc[j]; j++; } // Calculate chi-squared mRealType Yk, chi2; chi2 = 0.0; for (int ki = 0; ki < KList.size(); ki++) { Yk = dFk[ki]; //-Fk[ki]; for (int n = 0; n < N; n++) { Yk -= dcnk(n, ki) * t[n]; } chi2 += KList[ki][0] * KList[ki][0] * KList[ki][1] * Yk * Yk; } return (chi2); } template<class BreakupBasis> void LRBreakup<BreakupBasis>::DoAllBreakup(mRealType* chisqrlist, mRealType* Fk, mRealType* dFk, mRealType* t, mRealType* gt, mRealType* dt, mRealType* adjust) { const mRealType tolerance = std::numeric_limits<mRealType>::epsilon(); //t and adjust must be allocated up to Basis.NumBasisElem(); //Fk must be allocated and filled up to KList.size(); // assert(t.size()==adjust.size()); // assert(t.size()==Basis.NumBasisElem()); Matrix<mRealType> A; Matrix<mRealType> Af; Matrix<mRealType> As; std::vector<mRealType> b; std::vector<mRealType> bf; std::vector<mRealType> bs; Matrix<mRealType> cnk; Matrix<mRealType> dcnk; int N = Basis.NumBasisElem(); //t.size(); A.resize(N, N); Af.resize(N, N); As.resize(N, N); b.resize(N, 0.0); bf.resize(N, 0.0); bs.resize(N, 0.0); cnk.resize(N, KList.size()); dcnk.resize(N, KList.size()); //Fill in cnk. for (int n = 0; n < N; n++) { for (int ki = 0; ki < KList.size(); ki++) { mRealType k = KList[ki][0]; cnk(n, ki) = Basis.c(n, k); dcnk(n, ki) = Basis.dc_dk(n, k); //-Basis.c(n,k); } } //Fill in A and b A = 0.0; Af = 0.0; As = 0.0; for (int l = 0; l < N; l++) { for (int ki = 0; ki < KList.size(); ki++) { mRealType k2 = KList[ki][0] * KList[ki][0]; mRealType temp = KList[ki][1] * Fk[ki] * cnk(l, ki); // b[l] += k2*KList[ki][1]*(dFk[ki]-Fk[ki]) * dcnk(l, ki); b[l] += temp; bf[l] += k2 * temp; bs[l] += k2 * KList[ki][1] * dFk[ki] * dcnk(l, ki); for (int n = 0; n < N; n++) { temp = KList[ki][1] * cnk(l, ki) * cnk(n, ki); A(l, n) += temp; Af(l, n) += k2 * temp; As(l, n) += k2 * KList[ki][1] * dcnk(l, ki) * dcnk(n, ki); } } } //************************************ //FOR POTENTIAL AND FORCE //************************************ //Reduce for constraints int M = N; for (int i = 0; i < N; i++) if (!adjust[i]) M--; //The c is for "constrained" Matrix<mRealType> Ac; Matrix<mRealType> Afc; Matrix<mRealType> Asc; Ac.resize(M, M); Afc.resize(M, M); Asc.resize(M, M); std::vector<mRealType> bc(M, 0.0), bfc(M, 0.0), bsc(M, 0.0), tc(M, 0.0), tfc(M, 0.0), tsc(M, 0.0); //Build constrained Ac and bc int j = 0; for (int col = 0; col < N; col++) { if (adjust[col]) { // Copy column a A to Ac int i = 0; for (int row = 0; row < N; row++) if (adjust[row]) { Ac(i, j) = A(row, col); Afc(i, j) = Af(row, col); Asc(i, j) = As(row, col); i++; } j++; } else { // Otherwise, subtract t(col)*A(:,col) from bc for (int row = 0; row < N; row++) { b[row] -= A(row, col) * t[col]; bf[row] -= Af(row, col) * gt[col]; bs[row] -= As(row, col) * dt[col]; } } } j = 0; for (int row = 0; row < N; row++) if (adjust[row]) { bc[j] = b[row]; bfc[j] = bf[row]; bsc[j] = bs[row]; j++; } // Do SVD: // ------- // Matrix<mRealType> U(M, M), V(M, M); // std::vector<mRealType> S(M), Sinv(M); // SVdecomp(Ac, U, S, V); //////////////////////////////// int m = Ac.rows(); int n = Ac.cols(); Matrix<mRealType> A_trans(n, m); Matrix<mRealType> Af_trans(n, m); Matrix<mRealType> As_trans(n, m); Matrix<mRealType> U, V; Matrix<mRealType> Uf, Vf; Matrix<mRealType> Us, Vs; U.resize(std::min(m, n), m); V.resize(n, std::min(m, n)); Uf.resize(std::min(m, n), m); Vf.resize(n, std::min(m, n)); Us.resize(std::min(m, n), m); Vs.resize(n, std::min(m, n)); std::vector<mRealType> S, Sinv; S.resize(std::min(n, m)); std::vector<mRealType> Sf, Sfinv; Sf.resize(std::min(n, m)); std::vector<mRealType> Ss, Ssinv; Ss.resize(std::min(n, m)); //do the transpose for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { A_trans(j, i) = Ac(i, j); Af_trans(j, i) = Afc(i, j); As_trans(j, i) = Asc(i, j); } } char JOBU = 'S'; char JOBVT = 'S'; int LDA = m; int LDU = m; int LDVT = std::min(m, n); int LWORK = 10 * std::max(3 * std::min(n, m) + std::max(m, n), 5 * std::min(m, n)); std::vector<mRealType> WORK(LWORK); int INFO; LAPACK::gesvd(&JOBU, &JOBVT, &m, &n, A_trans.data(), &LDA, &S[0], U.data(), &LDU, V.data(), &LDVT, &WORK[0], &LWORK, &INFO); assert(INFO == 0); LAPACK::gesvd(&JOBU, &JOBVT, &m, &n, Af_trans.data(), &LDA, &Sf[0], Uf.data(), &LDU, Vf.data(), &LDVT, &WORK[0], &LWORK, &INFO); assert(INFO == 0); LAPACK::gesvd(&JOBU, &JOBVT, &m, &n, As_trans.data(), &LDA, &Ss[0], Us.data(), &LDU, Vs.data(), &LDVT, &WORK[0], &LWORK, &INFO); assert(INFO == 0); int ur = U.rows(); int uc = U.cols(); Matrix<mRealType> U_trans(uc, ur); Matrix<mRealType> Uf_trans(uc, ur); Matrix<mRealType> Us_trans(uc, ur); for (int i = 0; i < ur; i++) { for (int j = 0; j < uc; j++) { U_trans(j, i) = U(i, j); Uf_trans(j, i) = Uf(i, j); Us_trans(j, i) = Us(i, j); } } U.resize(uc, ur); U = U_trans; Uf.resize(uc, ur); Uf = Uf_trans; Us.resize(uc, ur); Us = Us_trans; ////////////////////////////////// // Zero out near-singular values //First, do normal breakup. mRealType Smax = S[0]; for (int i = 1; i < M; i++) Smax = std::max(S[i], Smax); for (int i = 0; i < M; i++) if (S[i] < 0.0) std::cout << "negative singlar value.\n"; // perr << "Smax = " << Smax << std::endl; Sinv.resize(S.size()); for (int i = 0; i < M; i++) Sinv[i] = (S[i] < (tolerance * Smax)) ? 0.0 : (1.0 / S[i]); int numSingular = 0; for (int i = 0; i < Sinv.size(); i++) if (Sinv[i] == 0.0) numSingular++; if (numSingular > 0) std::cout << "There were " << numSingular << " singular values in energy breakup.\n"; // Compute t_n, removing singular values //Second, do force. Smax = Sf[0]; for (int i = 1; i < M; i++) Smax = std::max(Sf[i], Smax); for (int i = 0; i < M; i++) if (Sf[i] < 0.0) std::cout << "negative singlar value.\n"; // perr << "Smax = " << Smax << std::endl; Sfinv.resize(Sf.size()); for (int i = 0; i < M; i++) Sfinv[i] = (Sf[i] < (tolerance * Smax)) ? 0.0 : (1.0 / Sf[i]); numSingular = 0; for (int i = 0; i < Sfinv.size(); i++) if (Sfinv[i] == 0.0) numSingular++; if (numSingular > 0) std::cout << "There were " << numSingular << " singular values in force breakup.\n"; // Compute t_n, removing singular values //First, do strain. Smax = Ss[0]; for (int i = 1; i < M; i++) Smax = std::max(Ss[i], Smax); for (int i = 0; i < M; i++) if (Ss[i] < 0.0) std::cout << "negative singlar value.\n"; // perr << "Smax = " << Smax << std::endl; Ssinv.resize(Ss.size()); for (int i = 0; i < M; i++) Ssinv[i] = (Ss[i] < (tolerance * Smax)) ? 0.0 : (1.0 / Ss[i]); numSingular = 0; for (int i = 0; i < Ssinv.size(); i++) if (Ssinv[i] == 0.0) numSingular++; if (numSingular > 0) std::cout << "There were " << numSingular << " singular values in strain breakup.\n"; // Compute t_n, removing singular values for (int i = 0; i < M; i++) { mRealType coef = 0.0; mRealType coef_f = 0.0; mRealType coef_s = 0.0; for (int j = 0; j < M; j++) { coef += U(j, i) * bc[j]; coef_f += Uf(j, i) * bfc[j]; coef_s += Us(j, i) * bsc[j]; } coef *= Sinv[i]; coef_f *= Sfinv[i]; coef_s *= Ssinv[i]; for (int k = 0; k < M; k++) { tc[k] += coef * V(k, i); tfc[k] += coef_f * Vf(k, i); tsc[k] += coef_s * Vs(k, i); } } // Now copy tc values into t j = 0; for (int i = 0; i < N; i++) if (adjust[i]) { t[i] = tc[j]; gt[i] = tfc[j]; dt[i] = tsc[j]; j++; } // Calculate chi-squared mRealType Yk(0.0), chi2(0.0); mRealType Yk_f(0.0), chi2_f(0.0); mRealType Yk_s(0.0), chi2_s(0.0); for (int ki = 0; ki < KList.size(); ki++) { Yk = Fk[ki]; //-Fk[ki]; Yk_f = Fk[ki]; Yk_s = dFk[ki]; for (int n = 0; n < N; n++) { Yk -= cnk(n, ki) * t[n]; Yk_f -= cnk(n, ki) * gt[n]; Yk_s -= dcnk(n, ki) * dt[n]; } chi2 += KList[ki][1] * Yk * Yk; chi2_f += KList[ki][0] * KList[ki][0] * KList[ki][1] * Yk_f * Yk_f; chi2_s += KList[ki][0] * KList[ki][0] * KList[ki][1] * Yk_s * Yk_s; } // std::vector<mRealType> chisqrtmp(3); chisqrlist[0] = chi2; chisqrlist[1] = chi2_f; chisqrlist[2] = chi2_s; //chisqrlist=chisqrtmp; } } // namespace qmcplusplus #endif
parallel_utils_openmp.h
#pragma once #include <omp.h> #include <array> #include <vector> #include <type_traits> #include <atomic> #include <memory> class ParallelUtilsOpenMP { public: template<typename Int> static Int ceilDivision(Int a, Int b) { return (a + b - 1) / b; } class StaticScheduler { size_t m_NumElements; int m_NumThreads; size_t m_NumElementsPerThread; public: StaticScheduler(size_t numElements, int numThreads = omp_get_max_threads()) : m_NumElements(numElements), m_NumThreads(numThreads) { m_NumElementsPerThread = ceilDivision(numElements, (size_t)numThreads); } //! Retrieves the range of indices for the given thread index. bool getThreadRange(int threadIndex, size_t* start, size_t* end, size_t offset = 0) const { *start = offset + std::min((size_t)threadIndex * m_NumElementsPerThread, m_NumElements); *end = offset + std::min((size_t)(threadIndex + 1) * m_NumElementsPerThread, m_NumElements); return (*end > *start); } template<class F> void mapThreadLocal(int threadIndex, const F& mapper) { size_t localStart, localEnd; if (!getThreadRange(threadIndex, &localStart, &localEnd)) return; for (size_t j = localStart; j < localEnd; j++) mapper(j); } int numThreads() const { return m_NumThreads; } }; template<class PerThreadData> class PerThreadOperation { public: virtual ~PerThreadOperation() {} virtual int doWork(int numThreads, int ownThreadNum, PerThreadData& perThreadDat) = 0; }; struct ScheduleHints { bool m_UseDynamicScheduling = false; size_t m_DynamicSchedulingChunkSize = 1; ScheduleHints(bool useDynamicScheduling = false, size_t dynamicSchedulingChunkSize = 1) : m_UseDynamicScheduling(useDynamicScheduling), m_DynamicSchedulingChunkSize(dynamicSchedulingChunkSize) {} static ScheduleHints dynamic(size_t dynamicSchedulingChunkSize = 1) { return ScheduleHints(true, dynamicSchedulingChunkSize); } }; template<class PerThreadData, class Mapper> class MapWithLocalDataOp : public PerThreadOperation<PerThreadData> { protected: size_t m_NumElements; Mapper m_Mapper; std::vector<std::atomic<size_t>> m_JobCounters; ScheduleHints m_ScheduleHints; public: virtual ~MapWithLocalDataOp() {} MapWithLocalDataOp(size_t numElements, const Mapper& mapper, size_t numThreads, const ScheduleHints& scheduleHints = ScheduleHints()) : m_NumElements(numElements), m_Mapper(mapper), m_JobCounters(scheduleHints.m_UseDynamicScheduling ? numThreads : 0), m_ScheduleHints(scheduleHints) { for (auto& counter : m_JobCounters) counter.store(0); } virtual int doWork(int numThreads, int ownThreadNum, PerThreadData& perThreadData) override { if (numThreads == 1) { for (size_t i = 0; i < m_NumElements; i++) m_Mapper(i, perThreadData); return 0; } if (!m_ScheduleHints.m_UseDynamicScheduling || m_NumElements <= (size_t)numThreads) // Dynamic scheduling makes no sense if there is less data than threads. { StaticScheduler scheduleHelper(m_NumElements, numThreads); #pragma omp for for (int i = 0; i < scheduleHelper.numThreads(); i++) { scheduleHelper.mapThreadLocal(i, [&](size_t j) { m_Mapper(j, perThreadData); }); } } else { size_t numChunks = ceilDivision(m_NumElements, m_ScheduleHints.m_DynamicSchedulingChunkSize); StaticScheduler scheduleHelper(numChunks, numThreads); size_t threadRangeStart, threadRangeEnd; scheduleHelper.getThreadRange(ownThreadNum, &threadRangeStart, &threadRangeEnd); int activeThreadNum = ownThreadNum; int threadOffset = 1; bool skipLeft = false; auto getNextNeighborThread = [&]() { bool leftAvailable = ownThreadNum - threadOffset >= 0; bool rightAvailable = ownThreadNum + threadOffset < numThreads; if (!leftAvailable && !rightAvailable) return false; if (!skipLeft && leftAvailable) { activeThreadNum = ownThreadNum - threadOffset; if (!rightAvailable) threadOffset++; else skipLeft = true; return true; } activeThreadNum = ownThreadNum + threadOffset++; skipLeft = false; return true; }; while (true) { size_t jobId = threadRangeStart + m_JobCounters[activeThreadNum]++; if (jobId >= threadRangeEnd) { if (!getNextNeighborThread()) break; scheduleHelper.getThreadRange(activeThreadNum, &threadRangeStart, &threadRangeEnd); continue; } if (m_ScheduleHints.m_DynamicSchedulingChunkSize == 1) m_Mapper(jobId, perThreadData); else { size_t elementStart = jobId * m_ScheduleHints.m_DynamicSchedulingChunkSize; size_t elementEnd = std::min(m_NumElements, elementStart + m_ScheduleHints.m_DynamicSchedulingChunkSize); for (size_t index = elementStart; index < elementEnd; index++) m_Mapper(index, perThreadData); } } #pragma omp barrier } return 0; } }; template<class PerThreadData, class FoldFunc> class FoldOp : public PerThreadOperation<PerThreadData> { FoldFunc m_FoldFunc; bool m_Ordered = false; public: virtual int doWork(int numThreads, int, PerThreadData& perThreadData) override { if (numThreads == 1) { m_FoldFunc(perThreadData); return 0; } else if (m_Ordered) { #pragma omp for ordered schedule(static, 1) for (int i = 0; i < numThreads; i++) { #pragma omp ordered m_FoldFunc(perThreadData); } } else { #pragma omp critical m_FoldFunc(perThreadData); } return 0; } public: FoldOp(const FoldFunc& func, bool ordered = false) : m_FoldFunc(func), m_Ordered(ordered) {} }; template<class CreateData> using ReturnOfCreateData = typename std::result_of<CreateData(int)>::type; template<class CreateThreadLocalDataFunc> class Team { using ThreadLocalData = ReturnOfCreateData<CreateThreadLocalDataFunc>; CreateThreadLocalDataFunc m_CreateThreadLocalDataFunc; int m_NumThreads; std::vector<std::unique_ptr<PerThreadOperation<ThreadLocalData>>> m_List; bool m_Executed = false; public: Team(const CreateThreadLocalDataFunc& func, int numThreads) : m_CreateThreadLocalDataFunc(func), m_NumThreads(numThreads) {} ~Team() { if (!m_Executed) execute(); } //! Performs a parallel map operation on the range [0, numElements). The mapper should take an index and the thread-local data as argument. template<class Mapper> Team* mapWithLocalData(size_t numElements, const Mapper& mapper, const ScheduleHints& scheduleHints = ScheduleHints()) { m_List.emplace_back(new MapWithLocalDataOp<ThreadLocalData, Mapper>(numElements, mapper, m_NumThreads, scheduleHints)); return this; } //! Performs a parallel map operation on the range [0, numElements). The mapper should take an index as argument. template<class Mapper> Team* map(size_t numElements, const Mapper& mapper, const ScheduleHints& scheduleHints = ScheduleHints()) { return mapWithLocalData(numElements, [mapper](size_t i, ThreadLocalData&) { mapper(i); }, scheduleHints); } //! Calls the given lambda synchronized for each thread, passing the thread-local data. Intended for fold / reduce operations. template<class FoldFunc> Team* fold(const FoldFunc& foldFunc, bool ordered = false) { m_List.emplace_back(new FoldOp<ThreadLocalData, FoldFunc>(foldFunc, ordered)); return this; } //! Adds a user-defined operation to the OpList. The CustomAdder class must implement a static function addOp(OpList*, params...). template<typename CustomAdder, typename... Params> Team* customOp(const Params& ... params) { CustomAdder::addOp(this, params...); return this; } void execute() { if (m_NumThreads == 1) { // Single threaded version. auto localData = m_CreateThreadLocalDataFunc(0); for (size_t i = 0; i < m_List.size(); i++) i += m_List[i]->doWork(1, 0, localData); } else { // Multithreaded version. #pragma omp parallel num_threads(m_NumThreads) { int ownThreadNum = omp_get_thread_num(); auto localData = m_CreateThreadLocalDataFunc(ownThreadNum); for (size_t i = 0; i < m_List.size(); i++) { i += m_List[i]->doWork(m_NumThreads, ownThreadNum, localData); } } } m_Executed = true; } }; struct NoThreadLocalData { int operator()(int) const { return 0; } }; //! @param createThreadLocalDataFunc is a function that initializes and returns the thread-local data passed to the mappers. The thread index is passed as an int parameter to the createData function. //! @param numThreads is the number of threads that should be used for the computation. template<class CreateThreadLocalDataFunc = NoThreadLocalData> static std::unique_ptr<Team<CreateThreadLocalDataFunc>> createTeam(const CreateThreadLocalDataFunc& createThreadLocalDataFunc = NoThreadLocalData(), int numThreads = omp_get_max_threads()) { return std::make_unique<Team<CreateThreadLocalDataFunc>>(createThreadLocalDataFunc, numThreads); } };
pt.c
/* Handle parameterized types (templates) for GNU -*- C++ -*-. Copyright (C) 1992-2018 Free Software Foundation, Inc. Written by Ken Raeburn (raeburn@cygnus.com) while at Watchmaker Computing. Rewritten by Jason Merrill (jason@cygnus.com). This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* Known bugs or deficiencies include: all methods must be provided in header files; can't use a source file that contains only the method templates and "just win". */ #include "config.h" #include "system.h" #include "coretypes.h" #include "cp-tree.h" #include "timevar.h" #include "stringpool.h" #include "varasm.h" #include "attribs.h" #include "stor-layout.h" #include "intl.h" #include "c-family/c-objc.h" #include "cp-objcp-common.h" #include "toplev.h" #include "tree-iterator.h" #include "type-utils.h" #include "gimplify.h" #include "gcc-rich-location.h" #include "selftest.h" /* The type of functions taking a tree, and some additional data, and returning an int. */ typedef int (*tree_fn_t) (tree, void*); /* The PENDING_TEMPLATES is a TREE_LIST of templates whose instantiations have been deferred, either because their definitions were not yet available, or because we were putting off doing the work. */ struct GTY ((chain_next ("%h.next"))) pending_template { struct pending_template *next; struct tinst_level *tinst; }; static GTY(()) struct pending_template *pending_templates; static GTY(()) struct pending_template *last_pending_template; int processing_template_parmlist; static int template_header_count; static GTY(()) tree saved_trees; static vec<int> inline_parm_levels; static GTY(()) struct tinst_level *current_tinst_level; static GTY(()) tree saved_access_scope; /* Live only within one (recursive) call to tsubst_expr. We use this to pass the statement expression node from the STMT_EXPR to the EXPR_STMT that is its result. */ static tree cur_stmt_expr; // -------------------------------------------------------------------------- // // Local Specialization Stack // // Implementation of the RAII helper for creating new local // specializations. local_specialization_stack::local_specialization_stack (lss_policy policy) : saved (local_specializations) { if (policy == lss_blank || !saved) local_specializations = new hash_map<tree, tree>; else local_specializations = new hash_map<tree, tree>(*saved); } local_specialization_stack::~local_specialization_stack () { delete local_specializations; local_specializations = saved; } /* True if we've recursed into fn_type_unification too many times. */ static bool excessive_deduction_depth; struct GTY((for_user)) spec_entry { tree tmpl; tree args; tree spec; }; struct spec_hasher : ggc_ptr_hash<spec_entry> { static hashval_t hash (spec_entry *); static bool equal (spec_entry *, spec_entry *); }; static GTY (()) hash_table<spec_hasher> *decl_specializations; static GTY (()) hash_table<spec_hasher> *type_specializations; /* Contains canonical template parameter types. The vector is indexed by the TEMPLATE_TYPE_IDX of the template parameter. Each element is a TREE_LIST, whose TREE_VALUEs contain the canonical template parameters of various types and levels. */ static GTY(()) vec<tree, va_gc> *canonical_template_parms; #define UNIFY_ALLOW_NONE 0 #define UNIFY_ALLOW_MORE_CV_QUAL 1 #define UNIFY_ALLOW_LESS_CV_QUAL 2 #define UNIFY_ALLOW_DERIVED 4 #define UNIFY_ALLOW_INTEGER 8 #define UNIFY_ALLOW_OUTER_LEVEL 16 #define UNIFY_ALLOW_OUTER_MORE_CV_QUAL 32 #define UNIFY_ALLOW_OUTER_LESS_CV_QUAL 64 enum template_base_result { tbr_incomplete_type, tbr_ambiguous_baseclass, tbr_success }; static void push_access_scope (tree); static void pop_access_scope (tree); static bool resolve_overloaded_unification (tree, tree, tree, tree, unification_kind_t, int, bool); static int try_one_overload (tree, tree, tree, tree, tree, unification_kind_t, int, bool, bool); static int unify (tree, tree, tree, tree, int, bool); static void add_pending_template (tree); static tree reopen_tinst_level (struct tinst_level *); static tree tsubst_initializer_list (tree, tree); static tree get_partial_spec_bindings (tree, tree, tree); static tree coerce_template_parms (tree, tree, tree, tsubst_flags_t, bool, bool); static tree coerce_innermost_template_parms (tree, tree, tree, tsubst_flags_t, bool, bool); static void tsubst_enum (tree, tree, tree); static tree add_to_template_args (tree, tree); static tree add_outermost_template_args (tree, tree); static bool check_instantiated_args (tree, tree, tsubst_flags_t); static int maybe_adjust_types_for_deduction (unification_kind_t, tree*, tree*, tree); static int type_unification_real (tree, tree, tree, const tree *, unsigned int, int, unification_kind_t, int, vec<deferred_access_check, va_gc> **, bool); static void note_template_header (int); static tree convert_nontype_argument_function (tree, tree, tsubst_flags_t); static tree convert_nontype_argument (tree, tree, tsubst_flags_t); static tree convert_template_argument (tree, tree, tree, tsubst_flags_t, int, tree); static tree for_each_template_parm (tree, tree_fn_t, void*, hash_set<tree> *, bool, tree_fn_t = NULL); static tree expand_template_argument_pack (tree); static tree build_template_parm_index (int, int, int, tree, tree); static bool inline_needs_template_parms (tree, bool); static void push_inline_template_parms_recursive (tree, int); static tree reduce_template_parm_level (tree, tree, int, tree, tsubst_flags_t); static int mark_template_parm (tree, void *); static int template_parm_this_level_p (tree, void *); static tree tsubst_friend_function (tree, tree); static tree tsubst_friend_class (tree, tree); static int can_complete_type_without_circularity (tree); static tree get_bindings (tree, tree, tree, bool); static int template_decl_level (tree); static int check_cv_quals_for_unify (int, tree, tree); static void template_parm_level_and_index (tree, int*, int*); static int unify_pack_expansion (tree, tree, tree, tree, unification_kind_t, bool, bool); static tree copy_template_args (tree); static tree tsubst_template_arg (tree, tree, tsubst_flags_t, tree); static tree tsubst_template_args (tree, tree, tsubst_flags_t, tree); static tree tsubst_template_parms (tree, tree, tsubst_flags_t); static tree most_specialized_partial_spec (tree, tsubst_flags_t); static tree tsubst_aggr_type (tree, tree, tsubst_flags_t, tree, int); static tree tsubst_arg_types (tree, tree, tree, tsubst_flags_t, tree); static tree tsubst_function_type (tree, tree, tsubst_flags_t, tree); static bool check_specialization_scope (void); static tree process_partial_specialization (tree); static void set_current_access_from_decl (tree); static enum template_base_result get_template_base (tree, tree, tree, tree, bool , tree *); static tree try_class_unification (tree, tree, tree, tree, bool); static int coerce_template_template_parms (tree, tree, tsubst_flags_t, tree, tree); static bool template_template_parm_bindings_ok_p (tree, tree); static void tsubst_default_arguments (tree, tsubst_flags_t); static tree for_each_template_parm_r (tree *, int *, void *); static tree copy_default_args_to_explicit_spec_1 (tree, tree); static void copy_default_args_to_explicit_spec (tree); static bool invalid_nontype_parm_type_p (tree, tsubst_flags_t); static bool dependent_template_arg_p (tree); static bool any_template_arguments_need_structural_equality_p (tree); static bool dependent_type_p_r (tree); static tree tsubst_copy (tree, tree, tsubst_flags_t, tree); static tree tsubst_decl (tree, tree, tsubst_flags_t); static void perform_typedefs_access_check (tree tmpl, tree targs); static void append_type_to_template_for_access_check_1 (tree, tree, tree, location_t); static tree listify (tree); static tree listify_autos (tree, tree); static tree tsubst_template_parm (tree, tree, tsubst_flags_t); static tree instantiate_alias_template (tree, tree, tsubst_flags_t); static bool complex_alias_template_p (const_tree tmpl); static tree tsubst_attributes (tree, tree, tsubst_flags_t, tree); static tree canonicalize_expr_argument (tree, tsubst_flags_t); static tree make_argument_pack (tree); static void register_parameter_specializations (tree, tree); static tree enclosing_instantiation_of (tree tctx); /* Make the current scope suitable for access checking when we are processing T. T can be FUNCTION_DECL for instantiated function template, VAR_DECL for static member variable, or TYPE_DECL for alias template (needed by instantiate_decl). */ static void push_access_scope (tree t) { gcc_assert (VAR_OR_FUNCTION_DECL_P (t) || TREE_CODE (t) == TYPE_DECL); if (DECL_FRIEND_CONTEXT (t)) push_nested_class (DECL_FRIEND_CONTEXT (t)); else if (DECL_CLASS_SCOPE_P (t)) push_nested_class (DECL_CONTEXT (t)); else push_to_top_level (); if (TREE_CODE (t) == FUNCTION_DECL) { saved_access_scope = tree_cons (NULL_TREE, current_function_decl, saved_access_scope); current_function_decl = t; } } /* Restore the scope set up by push_access_scope. T is the node we are processing. */ static void pop_access_scope (tree t) { if (TREE_CODE (t) == FUNCTION_DECL) { current_function_decl = TREE_VALUE (saved_access_scope); saved_access_scope = TREE_CHAIN (saved_access_scope); } if (DECL_FRIEND_CONTEXT (t) || DECL_CLASS_SCOPE_P (t)) pop_nested_class (); else pop_from_top_level (); } /* Do any processing required when DECL (a member template declaration) is finished. Returns the TEMPLATE_DECL corresponding to DECL, unless it is a specialization, in which case the DECL itself is returned. */ tree finish_member_template_decl (tree decl) { if (decl == error_mark_node) return error_mark_node; gcc_assert (DECL_P (decl)); if (TREE_CODE (decl) == TYPE_DECL) { tree type; type = TREE_TYPE (decl); if (type == error_mark_node) return error_mark_node; if (MAYBE_CLASS_TYPE_P (type) && CLASSTYPE_TEMPLATE_INFO (type) && !CLASSTYPE_TEMPLATE_SPECIALIZATION (type)) { tree tmpl = CLASSTYPE_TI_TEMPLATE (type); check_member_template (tmpl); return tmpl; } return NULL_TREE; } else if (TREE_CODE (decl) == FIELD_DECL) error ("data member %qD cannot be a member template", decl); else if (DECL_TEMPLATE_INFO (decl)) { if (!DECL_TEMPLATE_SPECIALIZATION (decl)) { check_member_template (DECL_TI_TEMPLATE (decl)); return DECL_TI_TEMPLATE (decl); } else return decl; } else error ("invalid member template declaration %qD", decl); return error_mark_node; } /* Create a template info node. */ tree build_template_info (tree template_decl, tree template_args) { tree result = make_node (TEMPLATE_INFO); TI_TEMPLATE (result) = template_decl; TI_ARGS (result) = template_args; return result; } /* Return the template info node corresponding to T, whatever T is. */ tree get_template_info (const_tree t) { tree tinfo = NULL_TREE; if (!t || t == error_mark_node) return NULL; if (TREE_CODE (t) == NAMESPACE_DECL || TREE_CODE (t) == PARM_DECL) return NULL; if (DECL_P (t) && DECL_LANG_SPECIFIC (t)) tinfo = DECL_TEMPLATE_INFO (t); if (!tinfo && DECL_IMPLICIT_TYPEDEF_P (t)) t = TREE_TYPE (t); if (OVERLOAD_TYPE_P (t)) tinfo = TYPE_TEMPLATE_INFO (t); else if (TREE_CODE (t) == BOUND_TEMPLATE_TEMPLATE_PARM) tinfo = TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (t); return tinfo; } /* Returns the template nesting level of the indicated class TYPE. For example, in: template <class T> struct A { template <class U> struct B {}; }; A<T>::B<U> has depth two, while A<T> has depth one. Both A<T>::B<int> and A<int>::B<U> have depth one, if they are instantiations, not specializations. This function is guaranteed to return 0 if passed NULL_TREE so that, for example, `template_class_depth (current_class_type)' is always safe. */ int template_class_depth (tree type) { int depth; for (depth = 0; type && TREE_CODE (type) != NAMESPACE_DECL; ) { tree tinfo = get_template_info (type); if (tinfo && PRIMARY_TEMPLATE_P (TI_TEMPLATE (tinfo)) && uses_template_parms (INNERMOST_TEMPLATE_ARGS (TI_ARGS (tinfo)))) ++depth; if (DECL_P (type)) type = CP_DECL_CONTEXT (type); else if (LAMBDA_TYPE_P (type)) type = LAMBDA_TYPE_EXTRA_SCOPE (type); else type = CP_TYPE_CONTEXT (type); } return depth; } /* Subroutine of maybe_begin_member_template_processing. Returns true if processing DECL needs us to push template parms. */ static bool inline_needs_template_parms (tree decl, bool nsdmi) { if (!decl || (!nsdmi && ! DECL_TEMPLATE_INFO (decl))) return false; return (TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (most_general_template (decl))) > (processing_template_decl + DECL_TEMPLATE_SPECIALIZATION (decl))); } /* Subroutine of maybe_begin_member_template_processing. Push the template parms in PARMS, starting from LEVELS steps into the chain, and ending at the beginning, since template parms are listed innermost first. */ static void push_inline_template_parms_recursive (tree parmlist, int levels) { tree parms = TREE_VALUE (parmlist); int i; if (levels > 1) push_inline_template_parms_recursive (TREE_CHAIN (parmlist), levels - 1); ++processing_template_decl; current_template_parms = tree_cons (size_int (processing_template_decl), parms, current_template_parms); TEMPLATE_PARMS_FOR_INLINE (current_template_parms) = 1; begin_scope (TREE_VEC_LENGTH (parms) ? sk_template_parms : sk_template_spec, NULL); for (i = 0; i < TREE_VEC_LENGTH (parms); ++i) { tree parm = TREE_VALUE (TREE_VEC_ELT (parms, i)); if (error_operand_p (parm)) continue; gcc_assert (DECL_P (parm)); switch (TREE_CODE (parm)) { case TYPE_DECL: case TEMPLATE_DECL: pushdecl (parm); break; case PARM_DECL: /* Push the CONST_DECL. */ pushdecl (TEMPLATE_PARM_DECL (DECL_INITIAL (parm))); break; default: gcc_unreachable (); } } } /* Restore the template parameter context for a member template, a friend template defined in a class definition, or a non-template member of template class. */ void maybe_begin_member_template_processing (tree decl) { tree parms; int levels = 0; bool nsdmi = TREE_CODE (decl) == FIELD_DECL; if (nsdmi) { tree ctx = DECL_CONTEXT (decl); decl = (CLASSTYPE_TEMPLATE_INFO (ctx) /* Disregard full specializations (c++/60999). */ && uses_template_parms (ctx) ? CLASSTYPE_TI_TEMPLATE (ctx) : NULL_TREE); } if (inline_needs_template_parms (decl, nsdmi)) { parms = DECL_TEMPLATE_PARMS (most_general_template (decl)); levels = TMPL_PARMS_DEPTH (parms) - processing_template_decl; if (DECL_TEMPLATE_SPECIALIZATION (decl)) { --levels; parms = TREE_CHAIN (parms); } push_inline_template_parms_recursive (parms, levels); } /* Remember how many levels of template parameters we pushed so that we can pop them later. */ inline_parm_levels.safe_push (levels); } /* Undo the effects of maybe_begin_member_template_processing. */ void maybe_end_member_template_processing (void) { int i; int last; if (inline_parm_levels.length () == 0) return; last = inline_parm_levels.pop (); for (i = 0; i < last; ++i) { --processing_template_decl; current_template_parms = TREE_CHAIN (current_template_parms); poplevel (0, 0, 0); } } /* Return a new template argument vector which contains all of ARGS, but has as its innermost set of arguments the EXTRA_ARGS. */ static tree add_to_template_args (tree args, tree extra_args) { tree new_args; int extra_depth; int i; int j; if (args == NULL_TREE || extra_args == error_mark_node) return extra_args; extra_depth = TMPL_ARGS_DEPTH (extra_args); new_args = make_tree_vec (TMPL_ARGS_DEPTH (args) + extra_depth); for (i = 1; i <= TMPL_ARGS_DEPTH (args); ++i) SET_TMPL_ARGS_LEVEL (new_args, i, TMPL_ARGS_LEVEL (args, i)); for (j = 1; j <= extra_depth; ++j, ++i) SET_TMPL_ARGS_LEVEL (new_args, i, TMPL_ARGS_LEVEL (extra_args, j)); return new_args; } /* Like add_to_template_args, but only the outermost ARGS are added to the EXTRA_ARGS. In particular, all but TMPL_ARGS_DEPTH (EXTRA_ARGS) levels are added. This function is used to combine the template arguments from a partial instantiation with the template arguments used to attain the full instantiation from the partial instantiation. */ static tree add_outermost_template_args (tree args, tree extra_args) { tree new_args; /* If there are more levels of EXTRA_ARGS than there are ARGS, something very fishy is going on. */ gcc_assert (TMPL_ARGS_DEPTH (args) >= TMPL_ARGS_DEPTH (extra_args)); /* If *all* the new arguments will be the EXTRA_ARGS, just return them. */ if (TMPL_ARGS_DEPTH (args) == TMPL_ARGS_DEPTH (extra_args)) return extra_args; /* For the moment, we make ARGS look like it contains fewer levels. */ TREE_VEC_LENGTH (args) -= TMPL_ARGS_DEPTH (extra_args); new_args = add_to_template_args (args, extra_args); /* Now, we restore ARGS to its full dimensions. */ TREE_VEC_LENGTH (args) += TMPL_ARGS_DEPTH (extra_args); return new_args; } /* Return the N levels of innermost template arguments from the ARGS. */ tree get_innermost_template_args (tree args, int n) { tree new_args; int extra_levels; int i; gcc_assert (n >= 0); /* If N is 1, just return the innermost set of template arguments. */ if (n == 1) return TMPL_ARGS_LEVEL (args, TMPL_ARGS_DEPTH (args)); /* If we're not removing anything, just return the arguments we were given. */ extra_levels = TMPL_ARGS_DEPTH (args) - n; gcc_assert (extra_levels >= 0); if (extra_levels == 0) return args; /* Make a new set of arguments, not containing the outer arguments. */ new_args = make_tree_vec (n); for (i = 1; i <= n; ++i) SET_TMPL_ARGS_LEVEL (new_args, i, TMPL_ARGS_LEVEL (args, i + extra_levels)); return new_args; } /* The inverse of get_innermost_template_args: Return all but the innermost EXTRA_LEVELS levels of template arguments from the ARGS. */ static tree strip_innermost_template_args (tree args, int extra_levels) { tree new_args; int n = TMPL_ARGS_DEPTH (args) - extra_levels; int i; gcc_assert (n >= 0); /* If N is 1, just return the outermost set of template arguments. */ if (n == 1) return TMPL_ARGS_LEVEL (args, 1); /* If we're not removing anything, just return the arguments we were given. */ gcc_assert (extra_levels >= 0); if (extra_levels == 0) return args; /* Make a new set of arguments, not containing the inner arguments. */ new_args = make_tree_vec (n); for (i = 1; i <= n; ++i) SET_TMPL_ARGS_LEVEL (new_args, i, TMPL_ARGS_LEVEL (args, i)); return new_args; } /* We've got a template header coming up; push to a new level for storing the parms. */ void begin_template_parm_list (void) { /* We use a non-tag-transparent scope here, which causes pushtag to put tags in this scope, rather than in the enclosing class or namespace scope. This is the right thing, since we want TEMPLATE_DECLS, and not TYPE_DECLS for template classes. For a global template class, push_template_decl handles putting the TEMPLATE_DECL into top-level scope. For a nested template class, e.g.: template <class T> struct S1 { template <class T> struct S2 {}; }; pushtag contains special code to insert the TEMPLATE_DECL for S2 at the right scope. */ begin_scope (sk_template_parms, NULL); ++processing_template_decl; ++processing_template_parmlist; note_template_header (0); /* Add a dummy parameter level while we process the parameter list. */ current_template_parms = tree_cons (size_int (processing_template_decl), make_tree_vec (0), current_template_parms); } /* This routine is called when a specialization is declared. If it is invalid to declare a specialization here, an error is reported and false is returned, otherwise this routine will return true. */ static bool check_specialization_scope (void) { tree scope = current_scope (); /* [temp.expl.spec] An explicit specialization shall be declared in the namespace of which the template is a member, or, for member templates, in the namespace of which the enclosing class or enclosing class template is a member. An explicit specialization of a member function, member class or static data member of a class template shall be declared in the namespace of which the class template is a member. */ if (scope && TREE_CODE (scope) != NAMESPACE_DECL) { error ("explicit specialization in non-namespace scope %qD", scope); return false; } /* [temp.expl.spec] In an explicit specialization declaration for a member of a class template or a member template that appears in namespace scope, the member template and some of its enclosing class templates may remain unspecialized, except that the declaration shall not explicitly specialize a class member template if its enclosing class templates are not explicitly specialized as well. */ if (current_template_parms) { error ("enclosing class templates are not explicitly specialized"); return false; } return true; } /* We've just seen template <>. */ bool begin_specialization (void) { begin_scope (sk_template_spec, NULL); note_template_header (1); return check_specialization_scope (); } /* Called at then end of processing a declaration preceded by template<>. */ void end_specialization (void) { finish_scope (); reset_specialization (); } /* Any template <>'s that we have seen thus far are not referring to a function specialization. */ void reset_specialization (void) { processing_specialization = 0; template_header_count = 0; } /* We've just seen a template header. If SPECIALIZATION is nonzero, it was of the form template <>. */ static void note_template_header (int specialization) { processing_specialization = specialization; template_header_count++; } /* We're beginning an explicit instantiation. */ void begin_explicit_instantiation (void) { gcc_assert (!processing_explicit_instantiation); processing_explicit_instantiation = true; } void end_explicit_instantiation (void) { gcc_assert (processing_explicit_instantiation); processing_explicit_instantiation = false; } /* An explicit specialization or partial specialization of TMPL is being declared. Check that the namespace in which the specialization is occurring is permissible. Returns false iff it is invalid to specialize TMPL in the current namespace. */ static bool check_specialization_namespace (tree tmpl) { tree tpl_ns = decl_namespace_context (tmpl); /* [tmpl.expl.spec] An explicit specialization shall be declared in a namespace enclosing the specialized template. An explicit specialization whose declarator-id is not qualified shall be declared in the nearest enclosing namespace of the template, or, if the namespace is inline (7.3.1), any namespace from its enclosing namespace set. */ if (current_scope() != DECL_CONTEXT (tmpl) && !at_namespace_scope_p ()) { error ("specialization of %qD must appear at namespace scope", tmpl); return false; } if (is_nested_namespace (current_namespace, tpl_ns, cxx_dialect < cxx11)) /* Same or enclosing namespace. */ return true; else { permerror (input_location, "specialization of %qD in different namespace", tmpl); inform (DECL_SOURCE_LOCATION (tmpl), " from definition of %q#D", tmpl); return false; } } /* SPEC is an explicit instantiation. Check that it is valid to perform this explicit instantiation in the current namespace. */ static void check_explicit_instantiation_namespace (tree spec) { tree ns; /* DR 275: An explicit instantiation shall appear in an enclosing namespace of its template. */ ns = decl_namespace_context (spec); if (!is_nested_namespace (current_namespace, ns)) permerror (input_location, "explicit instantiation of %qD in namespace %qD " "(which does not enclose namespace %qD)", spec, current_namespace, ns); } // Returns the type of a template specialization only if that // specialization needs to be defined. Otherwise (e.g., if the type has // already been defined), the function returns NULL_TREE. static tree maybe_new_partial_specialization (tree type) { // An implicit instantiation of an incomplete type implies // the definition of a new class template. // // template<typename T> // struct S; // // template<typename T> // struct S<T*>; // // Here, S<T*> is an implicit instantiation of S whose type // is incomplete. if (CLASSTYPE_IMPLICIT_INSTANTIATION (type) && !COMPLETE_TYPE_P (type)) return type; // It can also be the case that TYPE is a completed specialization. // Continuing the previous example, suppose we also declare: // // template<typename T> // requires Integral<T> // struct S<T*>; // // Here, S<T*> refers to the specialization S<T*> defined // above. However, we need to differentiate definitions because // we intend to define a new partial specialization. In this case, // we rely on the fact that the constraints are different for // this declaration than that above. // // Note that we also get here for injected class names and // late-parsed template definitions. We must ensure that we // do not create new type declarations for those cases. if (flag_concepts && CLASSTYPE_TEMPLATE_SPECIALIZATION (type)) { tree tmpl = CLASSTYPE_TI_TEMPLATE (type); tree args = CLASSTYPE_TI_ARGS (type); // If there are no template parameters, this cannot be a new // partial template specializtion? if (!current_template_parms) return NULL_TREE; // The injected-class-name is not a new partial specialization. if (DECL_SELF_REFERENCE_P (TYPE_NAME (type))) return NULL_TREE; // If the constraints are not the same as those of the primary // then, we can probably create a new specialization. tree type_constr = current_template_constraints (); if (type == TREE_TYPE (tmpl)) { tree main_constr = get_constraints (tmpl); if (equivalent_constraints (type_constr, main_constr)) return NULL_TREE; } // Also, if there's a pre-existing specialization with matching // constraints, then this also isn't new. tree specs = DECL_TEMPLATE_SPECIALIZATIONS (tmpl); while (specs) { tree spec_tmpl = TREE_VALUE (specs); tree spec_args = TREE_PURPOSE (specs); tree spec_constr = get_constraints (spec_tmpl); if (comp_template_args (args, spec_args) && equivalent_constraints (type_constr, spec_constr)) return NULL_TREE; specs = TREE_CHAIN (specs); } // Create a new type node (and corresponding type decl) // for the newly declared specialization. tree t = make_class_type (TREE_CODE (type)); CLASSTYPE_DECLARED_CLASS (t) = CLASSTYPE_DECLARED_CLASS (type); SET_TYPE_TEMPLATE_INFO (t, build_template_info (tmpl, args)); /* We only need a separate type node for storing the definition of this partial specialization; uses of S<T*> are unconstrained, so all are equivalent. So keep TYPE_CANONICAL the same. */ TYPE_CANONICAL (t) = TYPE_CANONICAL (type); // Build the corresponding type decl. tree d = create_implicit_typedef (DECL_NAME (tmpl), t); DECL_CONTEXT (d) = TYPE_CONTEXT (t); DECL_SOURCE_LOCATION (d) = input_location; return t; } return NULL_TREE; } /* The TYPE is being declared. If it is a template type, that means it is a partial specialization. Do appropriate error-checking. */ tree maybe_process_partial_specialization (tree type) { tree context; if (type == error_mark_node) return error_mark_node; /* A lambda that appears in specialization context is not itself a specialization. */ if (CLASS_TYPE_P (type) && CLASSTYPE_LAMBDA_EXPR (type)) return type; if (TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM) { error ("name of class shadows template template parameter %qD", TYPE_NAME (type)); return error_mark_node; } context = TYPE_CONTEXT (type); if (TYPE_ALIAS_P (type)) { tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (type); if (tinfo && DECL_ALIAS_TEMPLATE_P (TI_TEMPLATE (tinfo))) error ("specialization of alias template %qD", TI_TEMPLATE (tinfo)); else error ("explicit specialization of non-template %qT", type); return error_mark_node; } else if (CLASS_TYPE_P (type) && CLASSTYPE_USE_TEMPLATE (type)) { /* This is for ordinary explicit specialization and partial specialization of a template class such as: template <> class C<int>; or: template <class T> class C<T*>; Make sure that `C<int>' and `C<T*>' are implicit instantiations. */ if (tree t = maybe_new_partial_specialization (type)) { if (!check_specialization_namespace (CLASSTYPE_TI_TEMPLATE (t)) && !at_namespace_scope_p ()) return error_mark_node; SET_CLASSTYPE_TEMPLATE_SPECIALIZATION (t); DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (t)) = input_location; if (processing_template_decl) { tree decl = push_template_decl (TYPE_MAIN_DECL (t)); if (decl == error_mark_node) return error_mark_node; return TREE_TYPE (decl); } } else if (CLASSTYPE_TEMPLATE_INSTANTIATION (type)) error ("specialization of %qT after instantiation", type); else if (errorcount && !processing_specialization && CLASSTYPE_TEMPLATE_SPECIALIZATION (type) && !uses_template_parms (CLASSTYPE_TI_ARGS (type))) /* Trying to define a specialization either without a template<> header or in an inappropriate place. We've already given an error, so just bail now so we don't actually define the specialization. */ return error_mark_node; } else if (CLASS_TYPE_P (type) && !CLASSTYPE_USE_TEMPLATE (type) && CLASSTYPE_TEMPLATE_INFO (type) && context && CLASS_TYPE_P (context) && CLASSTYPE_TEMPLATE_INFO (context)) { /* This is for an explicit specialization of member class template according to [temp.expl.spec/18]: template <> template <class U> class C<int>::D; The context `C<int>' must be an implicit instantiation. Otherwise this is just a member class template declared earlier like: template <> class C<int> { template <class U> class D; }; template <> template <class U> class C<int>::D; In the first case, `C<int>::D' is a specialization of `C<T>::D' while in the second case, `C<int>::D' is a primary template and `C<T>::D' may not exist. */ if (CLASSTYPE_IMPLICIT_INSTANTIATION (context) && !COMPLETE_TYPE_P (type)) { tree t; tree tmpl = CLASSTYPE_TI_TEMPLATE (type); if (current_namespace != decl_namespace_context (tmpl)) { permerror (input_location, "specializing %q#T in different namespace", type); permerror (DECL_SOURCE_LOCATION (tmpl), " from definition of %q#D", tmpl); } /* Check for invalid specialization after instantiation: template <> template <> class C<int>::D<int>; template <> template <class U> class C<int>::D; */ for (t = DECL_TEMPLATE_INSTANTIATIONS (tmpl); t; t = TREE_CHAIN (t)) { tree inst = TREE_VALUE (t); if (CLASSTYPE_TEMPLATE_SPECIALIZATION (inst) || !COMPLETE_OR_OPEN_TYPE_P (inst)) { /* We already have a full specialization of this partial instantiation, or a full specialization has been looked up but not instantiated. Reassign it to the new member specialization template. */ spec_entry elt; spec_entry *entry; elt.tmpl = most_general_template (tmpl); elt.args = CLASSTYPE_TI_ARGS (inst); elt.spec = inst; type_specializations->remove_elt (&elt); elt.tmpl = tmpl; elt.args = INNERMOST_TEMPLATE_ARGS (elt.args); spec_entry **slot = type_specializations->find_slot (&elt, INSERT); entry = ggc_alloc<spec_entry> (); *entry = elt; *slot = entry; } else /* But if we've had an implicit instantiation, that's a problem ([temp.expl.spec]/6). */ error ("specialization %qT after instantiation %qT", type, inst); } /* Mark TYPE as a specialization. And as a result, we only have one level of template argument for the innermost class template. */ SET_CLASSTYPE_TEMPLATE_SPECIALIZATION (type); DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)) = input_location; CLASSTYPE_TI_ARGS (type) = INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (type)); } } else if (processing_specialization) { /* Someday C++0x may allow for enum template specialization. */ if (cxx_dialect > cxx98 && TREE_CODE (type) == ENUMERAL_TYPE && CLASS_TYPE_P (context) && CLASSTYPE_USE_TEMPLATE (context)) pedwarn (input_location, OPT_Wpedantic, "template specialization " "of %qD not allowed by ISO C++", type); else { error ("explicit specialization of non-template %qT", type); return error_mark_node; } } return type; } /* Returns nonzero if we can optimize the retrieval of specializations for TMPL, a TEMPLATE_DECL. In particular, for such a template, we do not use DECL_TEMPLATE_SPECIALIZATIONS at all. */ static inline bool optimize_specialization_lookup_p (tree tmpl) { return (DECL_FUNCTION_TEMPLATE_P (tmpl) && DECL_CLASS_SCOPE_P (tmpl) /* DECL_CLASS_SCOPE_P holds of T::f even if T is a template parameter. */ && CLASS_TYPE_P (DECL_CONTEXT (tmpl)) /* The optimized lookup depends on the fact that the template arguments for the member function template apply purely to the containing class, which is not true if the containing class is an explicit or partial specialization. */ && !CLASSTYPE_TEMPLATE_SPECIALIZATION (DECL_CONTEXT (tmpl)) && !DECL_MEMBER_TEMPLATE_P (tmpl) && !DECL_CONV_FN_P (tmpl) /* It is possible to have a template that is not a member template and is not a member of a template class: template <typename T> struct S { friend A::f(); }; Here, the friend function is a template, but the context does not have template information. The optimized lookup relies on having ARGS be the template arguments for both the class and the function template. */ && !DECL_FRIEND_P (DECL_TEMPLATE_RESULT (tmpl))); } /* Make sure ARGS doesn't use any inappropriate typedefs; we should have gone through coerce_template_parms by now. */ static void verify_unstripped_args_1 (tree inner) { for (int i = 0; i < TREE_VEC_LENGTH (inner); ++i) { tree arg = TREE_VEC_ELT (inner, i); if (TREE_CODE (arg) == TEMPLATE_DECL) /* OK */; else if (TYPE_P (arg)) gcc_assert (strip_typedefs (arg, NULL) == arg); else if (ARGUMENT_PACK_P (arg)) verify_unstripped_args_1 (ARGUMENT_PACK_ARGS (arg)); else if (strip_typedefs (TREE_TYPE (arg), NULL) != TREE_TYPE (arg)) /* Allow typedefs on the type of a non-type argument, since a parameter can have them. */; else gcc_assert (strip_typedefs_expr (arg, NULL) == arg); } } static void verify_unstripped_args (tree args) { ++processing_template_decl; if (!any_dependent_template_arguments_p (args)) verify_unstripped_args_1 (INNERMOST_TEMPLATE_ARGS (args)); --processing_template_decl; } /* Retrieve the specialization (in the sense of [temp.spec] - a specialization is either an instantiation or an explicit specialization) of TMPL for the given template ARGS. If there is no such specialization, return NULL_TREE. The ARGS are a vector of arguments, or a vector of vectors of arguments, in the case of templates with more than one level of parameters. If TMPL is a type template and CLASS_SPECIALIZATIONS_P is true, then we search for a partial specialization matching ARGS. This parameter is ignored if TMPL is not a class template. We can also look up a FIELD_DECL, if it is a lambda capture pack; the result is a NONTYPE_ARGUMENT_PACK. */ static tree retrieve_specialization (tree tmpl, tree args, hashval_t hash) { if (tmpl == NULL_TREE) return NULL_TREE; if (args == error_mark_node) return NULL_TREE; gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL || TREE_CODE (tmpl) == FIELD_DECL); /* There should be as many levels of arguments as there are levels of parameters. */ gcc_assert (TMPL_ARGS_DEPTH (args) == (TREE_CODE (tmpl) == TEMPLATE_DECL ? TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (tmpl)) : template_class_depth (DECL_CONTEXT (tmpl)))); if (flag_checking) verify_unstripped_args (args); /* Lambda functions in templates aren't instantiated normally, but through tsubst_lambda_expr. */ if (lambda_fn_in_template_p (tmpl)) return NULL_TREE; if (optimize_specialization_lookup_p (tmpl)) { /* The template arguments actually apply to the containing class. Find the class specialization with those arguments. */ tree class_template = CLASSTYPE_TI_TEMPLATE (DECL_CONTEXT (tmpl)); tree class_specialization = retrieve_specialization (class_template, args, 0); if (!class_specialization) return NULL_TREE; /* Find the instance of TMPL. */ tree fns = get_class_binding (class_specialization, DECL_NAME (tmpl)); for (ovl_iterator iter (fns); iter; ++iter) { tree fn = *iter; if (DECL_TEMPLATE_INFO (fn) && DECL_TI_TEMPLATE (fn) == tmpl /* using-declarations can add base methods to the method vec, and we don't want those here. */ && DECL_CONTEXT (fn) == class_specialization) return fn; } return NULL_TREE; } else { spec_entry *found; spec_entry elt; hash_table<spec_hasher> *specializations; elt.tmpl = tmpl; elt.args = args; elt.spec = NULL_TREE; if (DECL_CLASS_TEMPLATE_P (tmpl)) specializations = type_specializations; else specializations = decl_specializations; if (hash == 0) hash = spec_hasher::hash (&elt); found = specializations->find_with_hash (&elt, hash); if (found) return found->spec; } return NULL_TREE; } /* Like retrieve_specialization, but for local declarations. */ tree retrieve_local_specialization (tree tmpl) { if (local_specializations == NULL) return NULL_TREE; tree *slot = local_specializations->get (tmpl); return slot ? *slot : NULL_TREE; } /* Returns nonzero iff DECL is a specialization of TMPL. */ int is_specialization_of (tree decl, tree tmpl) { tree t; if (TREE_CODE (decl) == FUNCTION_DECL) { for (t = decl; t != NULL_TREE; t = DECL_TEMPLATE_INFO (t) ? DECL_TI_TEMPLATE (t) : NULL_TREE) if (t == tmpl) return 1; } else { gcc_assert (TREE_CODE (decl) == TYPE_DECL); for (t = TREE_TYPE (decl); t != NULL_TREE; t = CLASSTYPE_USE_TEMPLATE (t) ? TREE_TYPE (CLASSTYPE_TI_TEMPLATE (t)) : NULL_TREE) if (same_type_ignoring_top_level_qualifiers_p (t, TREE_TYPE (tmpl))) return 1; } return 0; } /* Returns nonzero iff DECL is a specialization of friend declaration FRIEND_DECL according to [temp.friend]. */ bool is_specialization_of_friend (tree decl, tree friend_decl) { bool need_template = true; int template_depth; gcc_assert (TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == TYPE_DECL); /* For [temp.friend/6] when FRIEND_DECL is an ordinary member function of a template class, we want to check if DECL is a specialization if this. */ if (TREE_CODE (friend_decl) == FUNCTION_DECL && DECL_TEMPLATE_INFO (friend_decl) && !DECL_USE_TEMPLATE (friend_decl)) { /* We want a TEMPLATE_DECL for `is_specialization_of'. */ friend_decl = DECL_TI_TEMPLATE (friend_decl); need_template = false; } else if (TREE_CODE (friend_decl) == TEMPLATE_DECL && !PRIMARY_TEMPLATE_P (friend_decl)) need_template = false; /* There is nothing to do if this is not a template friend. */ if (TREE_CODE (friend_decl) != TEMPLATE_DECL) return false; if (is_specialization_of (decl, friend_decl)) return true; /* [temp.friend/6] A member of a class template may be declared to be a friend of a non-template class. In this case, the corresponding member of every specialization of the class template is a friend of the class granting friendship. For example, given a template friend declaration template <class T> friend void A<T>::f(); the member function below is considered a friend template <> struct A<int> { void f(); }; For this type of template friend, TEMPLATE_DEPTH below will be nonzero. To determine if DECL is a friend of FRIEND, we first check if the enclosing class is a specialization of another. */ template_depth = template_class_depth (CP_DECL_CONTEXT (friend_decl)); if (template_depth && DECL_CLASS_SCOPE_P (decl) && is_specialization_of (TYPE_NAME (DECL_CONTEXT (decl)), CLASSTYPE_TI_TEMPLATE (DECL_CONTEXT (friend_decl)))) { /* Next, we check the members themselves. In order to handle a few tricky cases, such as when FRIEND_DECL's are template <class T> friend void A<T>::g(T t); template <class T> template <T t> friend void A<T>::h(); and DECL's are void A<int>::g(int); template <int> void A<int>::h(); we need to figure out ARGS, the template arguments from the context of DECL. This is required for template substitution of `T' in the function parameter of `g' and template parameter of `h' in the above examples. Here ARGS corresponds to `int'. */ tree context = DECL_CONTEXT (decl); tree args = NULL_TREE; int current_depth = 0; while (current_depth < template_depth) { if (CLASSTYPE_TEMPLATE_INFO (context)) { if (current_depth == 0) args = TYPE_TI_ARGS (context); else args = add_to_template_args (TYPE_TI_ARGS (context), args); current_depth++; } context = TYPE_CONTEXT (context); } if (TREE_CODE (decl) == FUNCTION_DECL) { bool is_template; tree friend_type; tree decl_type; tree friend_args_type; tree decl_args_type; /* Make sure that both DECL and FRIEND_DECL are templates or non-templates. */ is_template = DECL_TEMPLATE_INFO (decl) && PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (decl)); if (need_template ^ is_template) return false; else if (is_template) { /* If both are templates, check template parameter list. */ tree friend_parms = tsubst_template_parms (DECL_TEMPLATE_PARMS (friend_decl), args, tf_none); if (!comp_template_parms (DECL_TEMPLATE_PARMS (DECL_TI_TEMPLATE (decl)), friend_parms)) return false; decl_type = TREE_TYPE (DECL_TI_TEMPLATE (decl)); } else decl_type = TREE_TYPE (decl); friend_type = tsubst_function_type (TREE_TYPE (friend_decl), args, tf_none, NULL_TREE); if (friend_type == error_mark_node) return false; /* Check if return types match. */ if (!same_type_p (TREE_TYPE (decl_type), TREE_TYPE (friend_type))) return false; /* Check if function parameter types match, ignoring the `this' parameter. */ friend_args_type = TYPE_ARG_TYPES (friend_type); decl_args_type = TYPE_ARG_TYPES (decl_type); if (DECL_NONSTATIC_MEMBER_FUNCTION_P (friend_decl)) friend_args_type = TREE_CHAIN (friend_args_type); if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl)) decl_args_type = TREE_CHAIN (decl_args_type); return compparms (decl_args_type, friend_args_type); } else { /* DECL is a TYPE_DECL */ bool is_template; tree decl_type = TREE_TYPE (decl); /* Make sure that both DECL and FRIEND_DECL are templates or non-templates. */ is_template = CLASSTYPE_TEMPLATE_INFO (decl_type) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (decl_type)); if (need_template ^ is_template) return false; else if (is_template) { tree friend_parms; /* If both are templates, check the name of the two TEMPLATE_DECL's first because is_friend didn't. */ if (DECL_NAME (CLASSTYPE_TI_TEMPLATE (decl_type)) != DECL_NAME (friend_decl)) return false; /* Now check template parameter list. */ friend_parms = tsubst_template_parms (DECL_TEMPLATE_PARMS (friend_decl), args, tf_none); return comp_template_parms (DECL_TEMPLATE_PARMS (CLASSTYPE_TI_TEMPLATE (decl_type)), friend_parms); } else return (DECL_NAME (decl) == DECL_NAME (friend_decl)); } } return false; } /* Register the specialization SPEC as a specialization of TMPL with the indicated ARGS. IS_FRIEND indicates whether the specialization is actually just a friend declaration. ATTRLIST is the list of attributes that the specialization is declared with or NULL when it isn't. Returns SPEC, or an equivalent prior declaration, if available. We also store instantiations of field packs in the hash table, even though they are not themselves templates, to make lookup easier. */ static tree register_specialization (tree spec, tree tmpl, tree args, bool is_friend, hashval_t hash) { tree fn; spec_entry **slot = NULL; spec_entry elt; gcc_assert ((TREE_CODE (tmpl) == TEMPLATE_DECL && DECL_P (spec)) || (TREE_CODE (tmpl) == FIELD_DECL && TREE_CODE (spec) == NONTYPE_ARGUMENT_PACK)); if (TREE_CODE (spec) == FUNCTION_DECL && uses_template_parms (DECL_TI_ARGS (spec))) /* This is the FUNCTION_DECL for a partial instantiation. Don't register it; we want the corresponding TEMPLATE_DECL instead. We use `uses_template_parms (DECL_TI_ARGS (spec))' rather than the more obvious `uses_template_parms (spec)' to avoid problems with default function arguments. In particular, given something like this: template <class T> void f(T t1, T t = T()) the default argument expression is not substituted for in an instantiation unless and until it is actually needed. */ return spec; if (optimize_specialization_lookup_p (tmpl)) /* We don't put these specializations in the hash table, but we might want to give an error about a mismatch. */ fn = retrieve_specialization (tmpl, args, 0); else { elt.tmpl = tmpl; elt.args = args; elt.spec = spec; if (hash == 0) hash = spec_hasher::hash (&elt); slot = decl_specializations->find_slot_with_hash (&elt, hash, INSERT); if (*slot) fn = ((spec_entry *) *slot)->spec; else fn = NULL_TREE; } /* We can sometimes try to re-register a specialization that we've already got. In particular, regenerate_decl_from_template calls duplicate_decls which will update the specialization list. But, we'll still get called again here anyhow. It's more convenient to simply allow this than to try to prevent it. */ if (fn == spec) return spec; else if (fn && DECL_TEMPLATE_SPECIALIZATION (spec)) { if (DECL_TEMPLATE_INSTANTIATION (fn)) { if (DECL_ODR_USED (fn) || DECL_EXPLICIT_INSTANTIATION (fn)) { error ("specialization of %qD after instantiation", fn); return error_mark_node; } else { tree clone; /* This situation should occur only if the first specialization is an implicit instantiation, the second is an explicit specialization, and the implicit instantiation has not yet been used. That situation can occur if we have implicitly instantiated a member function and then specialized it later. We can also wind up here if a friend declaration that looked like an instantiation turns out to be a specialization: template <class T> void foo(T); class S { friend void foo<>(int) }; template <> void foo(int); We transform the existing DECL in place so that any pointers to it become pointers to the updated declaration. If there was a definition for the template, but not for the specialization, we want this to look as if there were no definition, and vice versa. */ DECL_INITIAL (fn) = NULL_TREE; duplicate_decls (spec, fn, is_friend); /* The call to duplicate_decls will have applied [temp.expl.spec]: An explicit specialization of a function template is inline only if it is explicitly declared to be, and independently of whether its function template is. to the primary function; now copy the inline bits to the various clones. */ FOR_EACH_CLONE (clone, fn) { DECL_DECLARED_INLINE_P (clone) = DECL_DECLARED_INLINE_P (fn); DECL_SOURCE_LOCATION (clone) = DECL_SOURCE_LOCATION (fn); DECL_DELETED_FN (clone) = DECL_DELETED_FN (fn); } check_specialization_namespace (tmpl); return fn; } } else if (DECL_TEMPLATE_SPECIALIZATION (fn)) { tree dd = duplicate_decls (spec, fn, is_friend); if (dd == error_mark_node) /* We've already complained in duplicate_decls. */ return error_mark_node; if (dd == NULL_TREE && DECL_INITIAL (spec)) /* Dup decl failed, but this is a new definition. Set the line number so any errors match this new definition. */ DECL_SOURCE_LOCATION (fn) = DECL_SOURCE_LOCATION (spec); return fn; } } else if (fn) return duplicate_decls (spec, fn, is_friend); /* A specialization must be declared in the same namespace as the template it is specializing. */ if (DECL_P (spec) && DECL_TEMPLATE_SPECIALIZATION (spec) && !check_specialization_namespace (tmpl)) DECL_CONTEXT (spec) = DECL_CONTEXT (tmpl); if (slot != NULL /* !optimize_specialization_lookup_p (tmpl) */) { spec_entry *entry = ggc_alloc<spec_entry> (); gcc_assert (tmpl && args && spec); *entry = elt; *slot = entry; if ((TREE_CODE (spec) == FUNCTION_DECL && DECL_NAMESPACE_SCOPE_P (spec) && PRIMARY_TEMPLATE_P (tmpl) && DECL_SAVED_TREE (DECL_TEMPLATE_RESULT (tmpl)) == NULL_TREE) || variable_template_p (tmpl)) /* If TMPL is a forward declaration of a template function, keep a list of all specializations in case we need to reassign them to a friend template later in tsubst_friend_function. Also keep a list of all variable template instantiations so that process_partial_specialization can check whether a later partial specialization would have used it. */ DECL_TEMPLATE_INSTANTIATIONS (tmpl) = tree_cons (args, spec, DECL_TEMPLATE_INSTANTIATIONS (tmpl)); } return spec; } /* Returns true iff two spec_entry nodes are equivalent. */ int comparing_specializations; bool spec_hasher::equal (spec_entry *e1, spec_entry *e2) { int equal; ++comparing_specializations; equal = (e1->tmpl == e2->tmpl && comp_template_args (e1->args, e2->args)); if (equal && flag_concepts /* tmpl could be a FIELD_DECL for a capture pack. */ && TREE_CODE (e1->tmpl) == TEMPLATE_DECL && VAR_P (DECL_TEMPLATE_RESULT (e1->tmpl)) && uses_template_parms (e1->args)) { /* Partial specializations of a variable template can be distinguished by constraints. */ tree c1 = e1->spec ? get_constraints (e1->spec) : NULL_TREE; tree c2 = e2->spec ? get_constraints (e2->spec) : NULL_TREE; equal = equivalent_constraints (c1, c2); } --comparing_specializations; return equal; } /* Returns a hash for a template TMPL and template arguments ARGS. */ static hashval_t hash_tmpl_and_args (tree tmpl, tree args) { hashval_t val = iterative_hash_object (DECL_UID (tmpl), 0); return iterative_hash_template_arg (args, val); } /* Returns a hash for a spec_entry node based on the TMPL and ARGS members, ignoring SPEC. */ hashval_t spec_hasher::hash (spec_entry *e) { return hash_tmpl_and_args (e->tmpl, e->args); } /* Recursively calculate a hash value for a template argument ARG, for use in the hash tables of template specializations. */ hashval_t iterative_hash_template_arg (tree arg, hashval_t val) { unsigned HOST_WIDE_INT i; enum tree_code code; char tclass; if (arg == NULL_TREE) return iterative_hash_object (arg, val); if (!TYPE_P (arg)) STRIP_NOPS (arg); if (TREE_CODE (arg) == ARGUMENT_PACK_SELECT) gcc_unreachable (); code = TREE_CODE (arg); tclass = TREE_CODE_CLASS (code); val = iterative_hash_object (code, val); switch (code) { case ERROR_MARK: return val; case IDENTIFIER_NODE: return iterative_hash_object (IDENTIFIER_HASH_VALUE (arg), val); case TREE_VEC: { int i, len = TREE_VEC_LENGTH (arg); for (i = 0; i < len; ++i) val = iterative_hash_template_arg (TREE_VEC_ELT (arg, i), val); return val; } case TYPE_PACK_EXPANSION: case EXPR_PACK_EXPANSION: val = iterative_hash_template_arg (PACK_EXPANSION_PATTERN (arg), val); return iterative_hash_template_arg (PACK_EXPANSION_EXTRA_ARGS (arg), val); case TYPE_ARGUMENT_PACK: case NONTYPE_ARGUMENT_PACK: return iterative_hash_template_arg (ARGUMENT_PACK_ARGS (arg), val); case TREE_LIST: for (; arg; arg = TREE_CHAIN (arg)) val = iterative_hash_template_arg (TREE_VALUE (arg), val); return val; case OVERLOAD: for (lkp_iterator iter (arg); iter; ++iter) val = iterative_hash_template_arg (*iter, val); return val; case CONSTRUCTOR: { tree field, value; iterative_hash_template_arg (TREE_TYPE (arg), val); FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (arg), i, field, value) { val = iterative_hash_template_arg (field, val); val = iterative_hash_template_arg (value, val); } return val; } case PARM_DECL: if (!DECL_ARTIFICIAL (arg)) { val = iterative_hash_object (DECL_PARM_INDEX (arg), val); val = iterative_hash_object (DECL_PARM_LEVEL (arg), val); } return iterative_hash_template_arg (TREE_TYPE (arg), val); case TARGET_EXPR: return iterative_hash_template_arg (TARGET_EXPR_INITIAL (arg), val); case PTRMEM_CST: val = iterative_hash_template_arg (PTRMEM_CST_CLASS (arg), val); return iterative_hash_template_arg (PTRMEM_CST_MEMBER (arg), val); case TEMPLATE_PARM_INDEX: val = iterative_hash_template_arg (TREE_TYPE (TEMPLATE_PARM_DECL (arg)), val); val = iterative_hash_object (TEMPLATE_PARM_LEVEL (arg), val); return iterative_hash_object (TEMPLATE_PARM_IDX (arg), val); case TRAIT_EXPR: val = iterative_hash_object (TRAIT_EXPR_KIND (arg), val); val = iterative_hash_template_arg (TRAIT_EXPR_TYPE1 (arg), val); return iterative_hash_template_arg (TRAIT_EXPR_TYPE2 (arg), val); case BASELINK: val = iterative_hash_template_arg (BINFO_TYPE (BASELINK_BINFO (arg)), val); return iterative_hash_template_arg (DECL_NAME (get_first_fn (arg)), val); case MODOP_EXPR: val = iterative_hash_template_arg (TREE_OPERAND (arg, 0), val); code = TREE_CODE (TREE_OPERAND (arg, 1)); val = iterative_hash_object (code, val); return iterative_hash_template_arg (TREE_OPERAND (arg, 2), val); case LAMBDA_EXPR: /* A lambda can't appear in a template arg, but don't crash on erroneous input. */ gcc_assert (seen_error ()); return val; case CAST_EXPR: case IMPLICIT_CONV_EXPR: case STATIC_CAST_EXPR: case REINTERPRET_CAST_EXPR: case CONST_CAST_EXPR: case DYNAMIC_CAST_EXPR: case NEW_EXPR: val = iterative_hash_template_arg (TREE_TYPE (arg), val); /* Now hash operands as usual. */ break; default: break; } switch (tclass) { case tcc_type: if (alias_template_specialization_p (arg)) { // We want an alias specialization that survived strip_typedefs // to hash differently from its TYPE_CANONICAL, to avoid hash // collisions that compare as different in template_args_equal. // These could be dependent specializations that strip_typedefs // left alone, or untouched specializations because // coerce_template_parms returns the unconverted template // arguments if it sees incomplete argument packs. tree ti = TYPE_ALIAS_TEMPLATE_INFO (arg); return hash_tmpl_and_args (TI_TEMPLATE (ti), TI_ARGS (ti)); } if (TYPE_CANONICAL (arg)) return iterative_hash_object (TYPE_HASH (TYPE_CANONICAL (arg)), val); else if (TREE_CODE (arg) == DECLTYPE_TYPE) return iterative_hash_template_arg (DECLTYPE_TYPE_EXPR (arg), val); /* Otherwise just compare the types during lookup. */ return val; case tcc_declaration: case tcc_constant: return iterative_hash_expr (arg, val); default: gcc_assert (IS_EXPR_CODE_CLASS (tclass)); { unsigned n = cp_tree_operand_length (arg); for (i = 0; i < n; ++i) val = iterative_hash_template_arg (TREE_OPERAND (arg, i), val); return val; } } gcc_unreachable (); return 0; } /* Unregister the specialization SPEC as a specialization of TMPL. Replace it with NEW_SPEC, if NEW_SPEC is non-NULL. Returns true if the SPEC was listed as a specialization of TMPL. Note that SPEC has been ggc_freed, so we can't look inside it. */ bool reregister_specialization (tree spec, tree tinfo, tree new_spec) { spec_entry *entry; spec_entry elt; elt.tmpl = most_general_template (TI_TEMPLATE (tinfo)); elt.args = TI_ARGS (tinfo); elt.spec = NULL_TREE; entry = decl_specializations->find (&elt); if (entry != NULL) { gcc_assert (entry->spec == spec || entry->spec == new_spec); gcc_assert (new_spec != NULL_TREE); entry->spec = new_spec; return 1; } return 0; } /* Like register_specialization, but for local declarations. We are registering SPEC, an instantiation of TMPL. */ void register_local_specialization (tree spec, tree tmpl) { gcc_assert (tmpl != spec); local_specializations->put (tmpl, spec); } /* TYPE is a class type. Returns true if TYPE is an explicitly specialized class. */ bool explicit_class_specialization_p (tree type) { if (!CLASSTYPE_TEMPLATE_SPECIALIZATION (type)) return false; return !uses_template_parms (CLASSTYPE_TI_ARGS (type)); } /* Print the list of functions at FNS, going through all the overloads for each element of the list. Alternatively, FNS can not be a TREE_LIST, in which case it will be printed together with all the overloads. MORE and *STR should respectively be FALSE and NULL when the function is called from the outside. They are used internally on recursive calls. print_candidates manages the two parameters and leaves NULL in *STR when it ends. */ static void print_candidates_1 (tree fns, char **str, bool more = false) { if (TREE_CODE (fns) == TREE_LIST) for (; fns; fns = TREE_CHAIN (fns)) print_candidates_1 (TREE_VALUE (fns), str, more || TREE_CHAIN (fns)); else for (lkp_iterator iter (fns); iter;) { tree cand = *iter; ++iter; const char *pfx = *str; if (!pfx) { if (more || iter) pfx = _("candidates are:"); else pfx = _("candidate is:"); *str = get_spaces (pfx); } inform (DECL_SOURCE_LOCATION (cand), "%s %#qD", pfx, cand); } } /* Print the list of candidate FNS in an error message. FNS can also be a TREE_LIST of non-functions in the case of an ambiguous lookup. */ void print_candidates (tree fns) { char *str = NULL; print_candidates_1 (fns, &str); free (str); } /* Get a (possibly) constrained template declaration for the purpose of ordering candidates. */ static tree get_template_for_ordering (tree list) { gcc_assert (TREE_CODE (list) == TREE_LIST); tree f = TREE_VALUE (list); if (tree ti = DECL_TEMPLATE_INFO (f)) return TI_TEMPLATE (ti); return f; } /* Among candidates having the same signature, return the most constrained or NULL_TREE if there is no best candidate. If the signatures of candidates vary (e.g., template specialization vs. member function), then there can be no most constrained. Note that we don't compare constraints on the functions themselves, but rather those of their templates. */ static tree most_constrained_function (tree candidates) { // Try to find the best candidate in a first pass. tree champ = candidates; for (tree c = TREE_CHAIN (champ); c; c = TREE_CHAIN (c)) { int winner = more_constrained (get_template_for_ordering (champ), get_template_for_ordering (c)); if (winner == -1) champ = c; // The candidate is more constrained else if (winner == 0) return NULL_TREE; // Neither is more constrained } // Verify that the champ is better than previous candidates. for (tree c = candidates; c != champ; c = TREE_CHAIN (c)) { if (!more_constrained (get_template_for_ordering (champ), get_template_for_ordering (c))) return NULL_TREE; } return champ; } /* Returns the template (one of the functions given by TEMPLATE_ID) which can be specialized to match the indicated DECL with the explicit template args given in TEMPLATE_ID. The DECL may be NULL_TREE if none is available. In that case, the functions in TEMPLATE_ID are non-members. If NEED_MEMBER_TEMPLATE is nonzero the function is known to be a specialization of a member template. The TEMPLATE_COUNT is the number of references to qualifying template classes that appeared in the name of the function. See check_explicit_specialization for a more accurate description. TSK indicates what kind of template declaration (if any) is being declared. TSK_TEMPLATE indicates that the declaration given by DECL, though a FUNCTION_DECL, has template parameters, and is therefore a template function. The template args (those explicitly specified and those deduced) are output in a newly created vector *TARGS_OUT. If it is impossible to determine the result, an error message is issued. The error_mark_node is returned to indicate failure. */ static tree determine_specialization (tree template_id, tree decl, tree* targs_out, int need_member_template, int template_count, tmpl_spec_kind tsk) { tree fns; tree targs; tree explicit_targs; tree candidates = NULL_TREE; /* A TREE_LIST of templates of which DECL may be a specialization. The TREE_VALUE of each node is a TEMPLATE_DECL. The corresponding TREE_PURPOSE is the set of template arguments that, when used to instantiate the template, would produce a function with the signature of DECL. */ tree templates = NULL_TREE; int header_count; cp_binding_level *b; *targs_out = NULL_TREE; if (template_id == error_mark_node || decl == error_mark_node) return error_mark_node; /* We shouldn't be specializing a member template of an unspecialized class template; we already gave an error in check_specialization_scope, now avoid crashing. */ if (!VAR_P (decl) && template_count && DECL_CLASS_SCOPE_P (decl) && template_class_depth (DECL_CONTEXT (decl)) > 0) { gcc_assert (errorcount); return error_mark_node; } fns = TREE_OPERAND (template_id, 0); explicit_targs = TREE_OPERAND (template_id, 1); if (fns == error_mark_node) return error_mark_node; /* Check for baselinks. */ if (BASELINK_P (fns)) fns = BASELINK_FUNCTIONS (fns); if (TREE_CODE (decl) == FUNCTION_DECL && !is_overloaded_fn (fns)) { error ("%qD is not a function template", fns); return error_mark_node; } else if (VAR_P (decl) && !variable_template_p (fns)) { error ("%qD is not a variable template", fns); return error_mark_node; } /* Count the number of template headers specified for this specialization. */ header_count = 0; for (b = current_binding_level; b->kind == sk_template_parms; b = b->level_chain) ++header_count; tree orig_fns = fns; if (variable_template_p (fns)) { tree parms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (fns)); targs = coerce_template_parms (parms, explicit_targs, fns, tf_warning_or_error, /*req_all*/true, /*use_defarg*/true); if (targs != error_mark_node) templates = tree_cons (targs, fns, templates); } else for (lkp_iterator iter (fns); iter; ++iter) { tree fn = *iter; if (TREE_CODE (fn) == TEMPLATE_DECL) { tree decl_arg_types; tree fn_arg_types; tree insttype; /* In case of explicit specialization, we need to check if the number of template headers appearing in the specialization is correct. This is usually done in check_explicit_specialization, but the check done there cannot be exhaustive when specializing member functions. Consider the following code: template <> void A<int>::f(int); template <> template <> void A<int>::f(int); Assuming that A<int> is not itself an explicit specialization already, the first line specializes "f" which is a non-template member function, whilst the second line specializes "f" which is a template member function. So both lines are syntactically correct, and check_explicit_specialization does not reject them. Here, we can do better, as we are matching the specialization against the declarations. We count the number of template headers, and we check if they match TEMPLATE_COUNT + 1 (TEMPLATE_COUNT is the number of qualifying template classes, plus there must be another header for the member template itself). Notice that if header_count is zero, this is not a specialization but rather a template instantiation, so there is no check we can perform here. */ if (header_count && header_count != template_count + 1) continue; /* Check that the number of template arguments at the innermost level for DECL is the same as for FN. */ if (current_binding_level->kind == sk_template_parms && !current_binding_level->explicit_spec_p && (TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (fn)) != TREE_VEC_LENGTH (INNERMOST_TEMPLATE_PARMS (current_template_parms)))) continue; /* DECL might be a specialization of FN. */ decl_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl)); fn_arg_types = TYPE_ARG_TYPES (TREE_TYPE (fn)); /* For a non-static member function, we need to make sure that the const qualification is the same. Since get_bindings does not try to merge the "this" parameter, we must do the comparison explicitly. */ if (DECL_NONSTATIC_MEMBER_FUNCTION_P (fn)) { if (!same_type_p (TREE_VALUE (fn_arg_types), TREE_VALUE (decl_arg_types))) continue; /* And the ref-qualification. */ if (type_memfn_rqual (TREE_TYPE (decl)) != type_memfn_rqual (TREE_TYPE (fn))) continue; } /* Skip the "this" parameter and, for constructors of classes with virtual bases, the VTT parameter. A full specialization of a constructor will have a VTT parameter, but a template never will. */ decl_arg_types = skip_artificial_parms_for (decl, decl_arg_types); fn_arg_types = skip_artificial_parms_for (fn, fn_arg_types); /* Function templates cannot be specializations; there are no partial specializations of functions. Therefore, if the type of DECL does not match FN, there is no match. Note that it should never be the case that we have both candidates added here, and for regular member functions below. */ if (tsk == tsk_template) { if (compparms (fn_arg_types, decl_arg_types)) candidates = tree_cons (NULL_TREE, fn, candidates); continue; } /* See whether this function might be a specialization of this template. Suppress access control because we might be trying to make this specialization a friend, and we have already done access control for the declaration of the specialization. */ push_deferring_access_checks (dk_no_check); targs = get_bindings (fn, decl, explicit_targs, /*check_ret=*/true); pop_deferring_access_checks (); if (!targs) /* We cannot deduce template arguments that when used to specialize TMPL will produce DECL. */ continue; if (uses_template_parms (targs)) /* We deduced something involving 'auto', which isn't a valid template argument. */ continue; /* Remove, from the set of candidates, all those functions whose constraints are not satisfied. */ if (flag_concepts && !constraints_satisfied_p (fn, targs)) continue; // Then, try to form the new function type. insttype = tsubst (TREE_TYPE (fn), targs, tf_fndecl_type, NULL_TREE); if (insttype == error_mark_node) continue; fn_arg_types = skip_artificial_parms_for (fn, TYPE_ARG_TYPES (insttype)); if (!compparms (fn_arg_types, decl_arg_types)) continue; /* Save this template, and the arguments deduced. */ templates = tree_cons (targs, fn, templates); } else if (need_member_template) /* FN is an ordinary member function, and we need a specialization of a member template. */ ; else if (TREE_CODE (fn) != FUNCTION_DECL) /* We can get IDENTIFIER_NODEs here in certain erroneous cases. */ ; else if (!DECL_FUNCTION_MEMBER_P (fn)) /* This is just an ordinary non-member function. Nothing can be a specialization of that. */ ; else if (DECL_ARTIFICIAL (fn)) /* Cannot specialize functions that are created implicitly. */ ; else { tree decl_arg_types; /* This is an ordinary member function. However, since we're here, we can assume its enclosing class is a template class. For example, template <typename T> struct S { void f(); }; template <> void S<int>::f() {} Here, S<int>::f is a non-template, but S<int> is a template class. If FN has the same type as DECL, we might be in business. */ if (!DECL_TEMPLATE_INFO (fn)) /* Its enclosing class is an explicit specialization of a template class. This is not a candidate. */ continue; if (!same_type_p (TREE_TYPE (TREE_TYPE (decl)), TREE_TYPE (TREE_TYPE (fn)))) /* The return types differ. */ continue; /* Adjust the type of DECL in case FN is a static member. */ decl_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl)); if (DECL_STATIC_FUNCTION_P (fn) && DECL_NONSTATIC_MEMBER_FUNCTION_P (decl)) decl_arg_types = TREE_CHAIN (decl_arg_types); if (!compparms (TYPE_ARG_TYPES (TREE_TYPE (fn)), decl_arg_types)) continue; if (DECL_NONSTATIC_MEMBER_FUNCTION_P (fn) && (type_memfn_rqual (TREE_TYPE (decl)) != type_memfn_rqual (TREE_TYPE (fn)))) continue; // If the deduced arguments do not satisfy the constraints, // this is not a candidate. if (flag_concepts && !constraints_satisfied_p (fn)) continue; // Add the candidate. candidates = tree_cons (NULL_TREE, fn, candidates); } } if (templates && TREE_CHAIN (templates)) { /* We have: [temp.expl.spec] It is possible for a specialization with a given function signature to be instantiated from more than one function template. In such cases, explicit specification of the template arguments must be used to uniquely identify the function template specialization being specialized. Note that here, there's no suggestion that we're supposed to determine which of the candidate templates is most specialized. However, we, also have: [temp.func.order] Partial ordering of overloaded function template declarations is used in the following contexts to select the function template to which a function template specialization refers: -- when an explicit specialization refers to a function template. So, we do use the partial ordering rules, at least for now. This extension can only serve to make invalid programs valid, so it's safe. And, there is strong anecdotal evidence that the committee intended the partial ordering rules to apply; the EDG front end has that behavior, and John Spicer claims that the committee simply forgot to delete the wording in [temp.expl.spec]. */ tree tmpl = most_specialized_instantiation (templates); if (tmpl != error_mark_node) { templates = tmpl; TREE_CHAIN (templates) = NULL_TREE; } } // Concepts allows multiple declarations of member functions // with the same signature. Like above, we need to rely on // on the partial ordering of those candidates to determine which // is the best. if (flag_concepts && candidates && TREE_CHAIN (candidates)) { if (tree cand = most_constrained_function (candidates)) { candidates = cand; TREE_CHAIN (cand) = NULL_TREE; } } if (templates == NULL_TREE && candidates == NULL_TREE) { error ("template-id %qD for %q+D does not match any template " "declaration", template_id, decl); if (header_count && header_count != template_count + 1) inform (input_location, "saw %d %<template<>%>, need %d for " "specializing a member function template", header_count, template_count + 1); else print_candidates (orig_fns); return error_mark_node; } else if ((templates && TREE_CHAIN (templates)) || (candidates && TREE_CHAIN (candidates)) || (templates && candidates)) { error ("ambiguous template specialization %qD for %q+D", template_id, decl); candidates = chainon (candidates, templates); print_candidates (candidates); return error_mark_node; } /* We have one, and exactly one, match. */ if (candidates) { tree fn = TREE_VALUE (candidates); *targs_out = copy_node (DECL_TI_ARGS (fn)); // Propagate the candidate's constraints to the declaration. set_constraints (decl, get_constraints (fn)); /* DECL is a re-declaration or partial instantiation of a template function. */ if (TREE_CODE (fn) == TEMPLATE_DECL) return fn; /* It was a specialization of an ordinary member function in a template class. */ return DECL_TI_TEMPLATE (fn); } /* It was a specialization of a template. */ targs = DECL_TI_ARGS (DECL_TEMPLATE_RESULT (TREE_VALUE (templates))); if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (targs)) { *targs_out = copy_node (targs); SET_TMPL_ARGS_LEVEL (*targs_out, TMPL_ARGS_DEPTH (*targs_out), TREE_PURPOSE (templates)); } else *targs_out = TREE_PURPOSE (templates); return TREE_VALUE (templates); } /* Returns a chain of parameter types, exactly like the SPEC_TYPES, but with the default argument values filled in from those in the TMPL_TYPES. */ static tree copy_default_args_to_explicit_spec_1 (tree spec_types, tree tmpl_types) { tree new_spec_types; if (!spec_types) return NULL_TREE; if (spec_types == void_list_node) return void_list_node; /* Substitute into the rest of the list. */ new_spec_types = copy_default_args_to_explicit_spec_1 (TREE_CHAIN (spec_types), TREE_CHAIN (tmpl_types)); /* Add the default argument for this parameter. */ return hash_tree_cons (TREE_PURPOSE (tmpl_types), TREE_VALUE (spec_types), new_spec_types); } /* DECL is an explicit specialization. Replicate default arguments from the template it specializes. (That way, code like: template <class T> void f(T = 3); template <> void f(double); void g () { f (); } works, as required.) An alternative approach would be to look up the correct default arguments at the call-site, but this approach is consistent with how implicit instantiations are handled. */ static void copy_default_args_to_explicit_spec (tree decl) { tree tmpl; tree spec_types; tree tmpl_types; tree new_spec_types; tree old_type; tree new_type; tree t; tree object_type = NULL_TREE; tree in_charge = NULL_TREE; tree vtt = NULL_TREE; /* See if there's anything we need to do. */ tmpl = DECL_TI_TEMPLATE (decl); tmpl_types = TYPE_ARG_TYPES (TREE_TYPE (DECL_TEMPLATE_RESULT (tmpl))); for (t = tmpl_types; t; t = TREE_CHAIN (t)) if (TREE_PURPOSE (t)) break; if (!t) return; old_type = TREE_TYPE (decl); spec_types = TYPE_ARG_TYPES (old_type); if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl)) { /* Remove the this pointer, but remember the object's type for CV quals. */ object_type = TREE_TYPE (TREE_VALUE (spec_types)); spec_types = TREE_CHAIN (spec_types); tmpl_types = TREE_CHAIN (tmpl_types); if (DECL_HAS_IN_CHARGE_PARM_P (decl)) { /* DECL may contain more parameters than TMPL due to the extra in-charge parameter in constructors and destructors. */ in_charge = spec_types; spec_types = TREE_CHAIN (spec_types); } if (DECL_HAS_VTT_PARM_P (decl)) { vtt = spec_types; spec_types = TREE_CHAIN (spec_types); } } /* Compute the merged default arguments. */ new_spec_types = copy_default_args_to_explicit_spec_1 (spec_types, tmpl_types); /* Compute the new FUNCTION_TYPE. */ if (object_type) { if (vtt) new_spec_types = hash_tree_cons (TREE_PURPOSE (vtt), TREE_VALUE (vtt), new_spec_types); if (in_charge) /* Put the in-charge parameter back. */ new_spec_types = hash_tree_cons (TREE_PURPOSE (in_charge), TREE_VALUE (in_charge), new_spec_types); new_type = build_method_type_directly (object_type, TREE_TYPE (old_type), new_spec_types); } else new_type = build_function_type (TREE_TYPE (old_type), new_spec_types); new_type = cp_build_type_attribute_variant (new_type, TYPE_ATTRIBUTES (old_type)); new_type = build_exception_variant (new_type, TYPE_RAISES_EXCEPTIONS (old_type)); if (TYPE_HAS_LATE_RETURN_TYPE (old_type)) TYPE_HAS_LATE_RETURN_TYPE (new_type) = 1; TREE_TYPE (decl) = new_type; } /* Return the number of template headers we expect to see for a definition or specialization of CTYPE or one of its non-template members. */ int num_template_headers_for_class (tree ctype) { int num_templates = 0; while (ctype && CLASS_TYPE_P (ctype)) { /* You're supposed to have one `template <...>' for every template class, but you don't need one for a full specialization. For example: template <class T> struct S{}; template <> struct S<int> { void f(); }; void S<int>::f () {} is correct; there shouldn't be a `template <>' for the definition of `S<int>::f'. */ if (!CLASSTYPE_TEMPLATE_INFO (ctype)) /* If CTYPE does not have template information of any kind, then it is not a template, nor is it nested within a template. */ break; if (explicit_class_specialization_p (ctype)) break; if (PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (ctype))) ++num_templates; ctype = TYPE_CONTEXT (ctype); } return num_templates; } /* Do a simple sanity check on the template headers that precede the variable declaration DECL. */ void check_template_variable (tree decl) { tree ctx = CP_DECL_CONTEXT (decl); int wanted = num_template_headers_for_class (ctx); if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl) && PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (decl))) { if (cxx_dialect < cxx14) pedwarn (DECL_SOURCE_LOCATION (decl), 0, "variable templates only available with " "-std=c++14 or -std=gnu++14"); // Namespace-scope variable templates should have a template header. ++wanted; } if (template_header_count > wanted) { bool warned = pedwarn (DECL_SOURCE_LOCATION (decl), 0, "too many template headers for %qD " "(should be %d)", decl, wanted); if (warned && CLASS_TYPE_P (ctx) && CLASSTYPE_TEMPLATE_SPECIALIZATION (ctx)) inform (DECL_SOURCE_LOCATION (decl), "members of an explicitly specialized class are defined " "without a template header"); } } /* An explicit specialization whose declarator-id or class-head-name is not qualified shall be declared in the nearest enclosing namespace of the template, or, if the namespace is inline (7.3.1), any namespace from its enclosing namespace set. If the name declared in the explicit instantiation is an unqualified name, the explicit instantiation shall appear in the namespace where its template is declared or, if that namespace is inline (7.3.1), any namespace from its enclosing namespace set. */ void check_unqualified_spec_or_inst (tree t, location_t loc) { tree tmpl = most_general_template (t); if (DECL_NAMESPACE_SCOPE_P (tmpl) && !is_nested_namespace (current_namespace, CP_DECL_CONTEXT (tmpl), true)) { if (processing_specialization) permerror (loc, "explicit specialization of %qD outside its " "namespace must use a nested-name-specifier", tmpl); else if (processing_explicit_instantiation && cxx_dialect >= cxx11) /* This was allowed in C++98, so only pedwarn. */ pedwarn (loc, OPT_Wpedantic, "explicit instantiation of %qD " "outside its namespace must use a nested-name-" "specifier", tmpl); } } /* Warn for a template specialization SPEC that is missing some of a set of function or type attributes that the template TEMPL is declared with. ATTRLIST is a list of additional attributes that SPEC should be taken to ultimately be declared with. */ static void warn_spec_missing_attributes (tree tmpl, tree spec, tree attrlist) { if (DECL_FUNCTION_TEMPLATE_P (tmpl)) tmpl = DECL_TEMPLATE_RESULT (tmpl); if (TREE_CODE (tmpl) != FUNCTION_DECL) return; /* Avoid warning if either declaration or its type is deprecated. */ if (TREE_DEPRECATED (tmpl) || TREE_DEPRECATED (spec)) return; tree tmpl_type = TREE_TYPE (tmpl); tree spec_type = TREE_TYPE (spec); if (TREE_DEPRECATED (tmpl_type) || TREE_DEPRECATED (spec_type) || TREE_DEPRECATED (TREE_TYPE (tmpl_type)) || TREE_DEPRECATED (TREE_TYPE (spec_type))) return; tree tmpl_attrs[] = { DECL_ATTRIBUTES (tmpl), TYPE_ATTRIBUTES (tmpl_type) }; tree spec_attrs[] = { DECL_ATTRIBUTES (spec), TYPE_ATTRIBUTES (spec_type) }; if (!spec_attrs[0]) spec_attrs[0] = attrlist; else if (!spec_attrs[1]) spec_attrs[1] = attrlist; /* Avoid warning if the primary has no attributes. */ if (!tmpl_attrs[0] && !tmpl_attrs[1]) return; /* Avoid warning if either declaration contains an attribute on the white list below. */ const char* const whitelist[] = { "error", "warning" }; for (unsigned i = 0; i != 2; ++i) for (unsigned j = 0; j != sizeof whitelist / sizeof *whitelist; ++j) if (lookup_attribute (whitelist[j], tmpl_attrs[i]) || lookup_attribute (whitelist[j], spec_attrs[i])) return; /* Avoid warning if the difference between the primary and the specialization is not in one of the attributes below. */ const char* const blacklist[] = { "alloc_align", "alloc_size", "assume_aligned", "format", "format_arg", "malloc", "nonnull" }; /* Put together a list of the black listed attributes that the primary template is declared with that the specialization is not, in case it's not apparent from the most recent declaration of the primary. */ unsigned nattrs = 0; pretty_printer str; for (unsigned i = 0; i != sizeof blacklist / sizeof *blacklist; ++i) { for (unsigned j = 0; j != 2; ++j) { if (!lookup_attribute (blacklist[i], tmpl_attrs[j])) continue; for (unsigned k = 0; k != 1 + !!spec_attrs[1]; ++k) { if (lookup_attribute (blacklist[i], spec_attrs[k])) break; if (nattrs) pp_string (&str, ", "); pp_begin_quote (&str, pp_show_color (global_dc->printer)); pp_string (&str, blacklist[i]); pp_end_quote (&str, pp_show_color (global_dc->printer)); ++nattrs; } } } if (!nattrs) return; if (warning_at (DECL_SOURCE_LOCATION (spec), OPT_Wmissing_attributes, "explicit specialization %q#D may be missing attributes", spec)) inform (DECL_SOURCE_LOCATION (tmpl), nattrs > 1 ? G_("missing primary template attributes %s") : G_("missing primary template attribute %s"), pp_formatted_text (&str)); } /* Check to see if the function just declared, as indicated in DECLARATOR, and in DECL, is a specialization of a function template. We may also discover that the declaration is an explicit instantiation at this point. Returns DECL, or an equivalent declaration that should be used instead if all goes well. Issues an error message if something is amiss. Returns error_mark_node if the error is not easily recoverable. FLAGS is a bitmask consisting of the following flags: 2: The function has a definition. 4: The function is a friend. The TEMPLATE_COUNT is the number of references to qualifying template classes that appeared in the name of the function. For example, in template <class T> struct S { void f(); }; void S<int>::f(); the TEMPLATE_COUNT would be 1. However, explicitly specialized classes are not counted in the TEMPLATE_COUNT, so that in template <class T> struct S {}; template <> struct S<int> { void f(); } template <> void S<int>::f(); the TEMPLATE_COUNT would be 0. (Note that this declaration is invalid; there should be no template <>.) If the function is a specialization, it is marked as such via DECL_TEMPLATE_SPECIALIZATION. Furthermore, its DECL_TEMPLATE_INFO is set up correctly, and it is added to the list of specializations for that template. */ tree check_explicit_specialization (tree declarator, tree decl, int template_count, int flags, tree attrlist) { int have_def = flags & 2; int is_friend = flags & 4; bool is_concept = flags & 8; int specialization = 0; int explicit_instantiation = 0; int member_specialization = 0; tree ctype = DECL_CLASS_CONTEXT (decl); tree dname = DECL_NAME (decl); tmpl_spec_kind tsk; if (is_friend) { if (!processing_specialization) tsk = tsk_none; else tsk = tsk_excessive_parms; } else tsk = current_tmpl_spec_kind (template_count); switch (tsk) { case tsk_none: if (processing_specialization && !VAR_P (decl)) { specialization = 1; SET_DECL_TEMPLATE_SPECIALIZATION (decl); } else if (TREE_CODE (declarator) == TEMPLATE_ID_EXPR) { if (is_friend) /* This could be something like: template <class T> void f(T); class S { friend void f<>(int); } */ specialization = 1; else { /* This case handles bogus declarations like template <> template <class T> void f<int>(); */ error ("template-id %qD in declaration of primary template", declarator); return decl; } } break; case tsk_invalid_member_spec: /* The error has already been reported in check_specialization_scope. */ return error_mark_node; case tsk_invalid_expl_inst: error ("template parameter list used in explicit instantiation"); /* Fall through. */ case tsk_expl_inst: if (have_def) error ("definition provided for explicit instantiation"); explicit_instantiation = 1; break; case tsk_excessive_parms: case tsk_insufficient_parms: if (tsk == tsk_excessive_parms) error ("too many template parameter lists in declaration of %qD", decl); else if (template_header_count) error("too few template parameter lists in declaration of %qD", decl); else error("explicit specialization of %qD must be introduced by " "%<template <>%>", decl); /* Fall through. */ case tsk_expl_spec: if (is_concept) error ("explicit specialization declared %<concept%>"); if (VAR_P (decl) && TREE_CODE (declarator) != TEMPLATE_ID_EXPR) /* In cases like template<> constexpr bool v = true; We'll give an error in check_template_variable. */ break; SET_DECL_TEMPLATE_SPECIALIZATION (decl); if (ctype) member_specialization = 1; else specialization = 1; break; case tsk_template: if (TREE_CODE (declarator) == TEMPLATE_ID_EXPR) { /* This case handles bogus declarations like template <> template <class T> void f<int>(); */ if (!uses_template_parms (declarator)) error ("template-id %qD in declaration of primary template", declarator); else if (variable_template_p (TREE_OPERAND (declarator, 0))) { /* Partial specialization of variable template. */ SET_DECL_TEMPLATE_SPECIALIZATION (decl); specialization = 1; goto ok; } else if (cxx_dialect < cxx14) error ("non-type partial specialization %qD " "is not allowed", declarator); else error ("non-class, non-variable partial specialization %qD " "is not allowed", declarator); return decl; ok:; } if (ctype && CLASSTYPE_TEMPLATE_INSTANTIATION (ctype)) /* This is a specialization of a member template, without specialization the containing class. Something like: template <class T> struct S { template <class U> void f (U); }; template <> template <class U> void S<int>::f(U) {} That's a specialization -- but of the entire template. */ specialization = 1; break; default: gcc_unreachable (); } if ((specialization || member_specialization) /* This doesn't apply to variable templates. */ && (TREE_CODE (TREE_TYPE (decl)) == FUNCTION_TYPE || TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE)) { tree t = TYPE_ARG_TYPES (TREE_TYPE (decl)); for (; t; t = TREE_CHAIN (t)) if (TREE_PURPOSE (t)) { permerror (input_location, "default argument specified in explicit specialization"); break; } } if (specialization || member_specialization || explicit_instantiation) { tree tmpl = NULL_TREE; tree targs = NULL_TREE; bool was_template_id = (TREE_CODE (declarator) == TEMPLATE_ID_EXPR); /* Make sure that the declarator is a TEMPLATE_ID_EXPR. */ if (!was_template_id) { tree fns; gcc_assert (identifier_p (declarator)); if (ctype) fns = dname; else { /* If there is no class context, the explicit instantiation must be at namespace scope. */ gcc_assert (DECL_NAMESPACE_SCOPE_P (decl)); /* Find the namespace binding, using the declaration context. */ fns = lookup_qualified_name (CP_DECL_CONTEXT (decl), dname, false, true); if (fns == error_mark_node) /* If lookup fails, look for a friend declaration so we can give a better diagnostic. */ fns = lookup_qualified_name (CP_DECL_CONTEXT (decl), dname, /*type*/false, /*complain*/true, /*hidden*/true); if (fns == error_mark_node || !is_overloaded_fn (fns)) { error ("%qD is not a template function", dname); fns = error_mark_node; } } declarator = lookup_template_function (fns, NULL_TREE); } if (declarator == error_mark_node) return error_mark_node; if (ctype != NULL_TREE && TYPE_BEING_DEFINED (ctype)) { if (!explicit_instantiation) /* A specialization in class scope. This is invalid, but the error will already have been flagged by check_specialization_scope. */ return error_mark_node; else { /* It's not valid to write an explicit instantiation in class scope, e.g.: class C { template void f(); } This case is caught by the parser. However, on something like: template class C { void f(); }; (which is invalid) we can get here. The error will be issued later. */ ; } return decl; } else if (ctype != NULL_TREE && (identifier_p (TREE_OPERAND (declarator, 0)))) { // We'll match variable templates in start_decl. if (VAR_P (decl)) return decl; /* Find the list of functions in ctype that have the same name as the declared function. */ tree name = TREE_OPERAND (declarator, 0); if (constructor_name_p (name, ctype)) { if (DECL_CONSTRUCTOR_P (decl) ? !TYPE_HAS_USER_CONSTRUCTOR (ctype) : !CLASSTYPE_DESTRUCTOR (ctype)) { /* From [temp.expl.spec]: If such an explicit specialization for the member of a class template names an implicitly-declared special member function (clause _special_), the program is ill-formed. Similar language is found in [temp.explicit]. */ error ("specialization of implicitly-declared special member function"); return error_mark_node; } name = DECL_NAME (decl); } /* For a type-conversion operator, We might be looking for `operator int' which will be a specialization of `operator T'. Grab all the conversion operators, and then select from them. */ tree fns = get_class_binding (ctype, IDENTIFIER_CONV_OP_P (name) ? conv_op_identifier : name); if (fns == NULL_TREE) { error ("no member function %qD declared in %qT", name, ctype); return error_mark_node; } else TREE_OPERAND (declarator, 0) = fns; } /* Figure out what exactly is being specialized at this point. Note that for an explicit instantiation, even one for a member function, we cannot tell a priori whether the instantiation is for a member template, or just a member function of a template class. Even if a member template is being instantiated, the member template arguments may be elided if they can be deduced from the rest of the declaration. */ tmpl = determine_specialization (declarator, decl, &targs, member_specialization, template_count, tsk); if (!tmpl || tmpl == error_mark_node) /* We couldn't figure out what this declaration was specializing. */ return error_mark_node; else { if (TREE_CODE (decl) == FUNCTION_DECL && DECL_HIDDEN_FRIEND_P (tmpl)) { if (pedwarn (DECL_SOURCE_LOCATION (decl), 0, "friend declaration %qD is not visible to " "explicit specialization", tmpl)) inform (DECL_SOURCE_LOCATION (tmpl), "friend declaration here"); } else if (!ctype && !is_friend && CP_DECL_CONTEXT (decl) == current_namespace) check_unqualified_spec_or_inst (tmpl, DECL_SOURCE_LOCATION (decl)); tree gen_tmpl = most_general_template (tmpl); if (explicit_instantiation) { /* We don't set DECL_EXPLICIT_INSTANTIATION here; that is done by do_decl_instantiation later. */ int arg_depth = TMPL_ARGS_DEPTH (targs); int parm_depth = TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (tmpl)); if (arg_depth > parm_depth) { /* If TMPL is not the most general template (for example, if TMPL is a friend template that is injected into namespace scope), then there will be too many levels of TARGS. Remove some of them here. */ int i; tree new_targs; new_targs = make_tree_vec (parm_depth); for (i = arg_depth - parm_depth; i < arg_depth; ++i) TREE_VEC_ELT (new_targs, i - (arg_depth - parm_depth)) = TREE_VEC_ELT (targs, i); targs = new_targs; } return instantiate_template (tmpl, targs, tf_error); } /* If we thought that the DECL was a member function, but it turns out to be specializing a static member function, make DECL a static member function as well. */ if (DECL_FUNCTION_TEMPLATE_P (tmpl) && DECL_STATIC_FUNCTION_P (tmpl) && DECL_NONSTATIC_MEMBER_FUNCTION_P (decl)) revert_static_member_fn (decl); /* If this is a specialization of a member template of a template class, we want to return the TEMPLATE_DECL, not the specialization of it. */ if (tsk == tsk_template && !was_template_id) { tree result = DECL_TEMPLATE_RESULT (tmpl); SET_DECL_TEMPLATE_SPECIALIZATION (tmpl); DECL_INITIAL (result) = NULL_TREE; if (have_def) { tree parm; DECL_SOURCE_LOCATION (tmpl) = DECL_SOURCE_LOCATION (decl); DECL_SOURCE_LOCATION (result) = DECL_SOURCE_LOCATION (decl); /* We want to use the argument list specified in the definition, not in the original declaration. */ DECL_ARGUMENTS (result) = DECL_ARGUMENTS (decl); for (parm = DECL_ARGUMENTS (result); parm; parm = DECL_CHAIN (parm)) DECL_CONTEXT (parm) = result; } return register_specialization (tmpl, gen_tmpl, targs, is_friend, 0); } /* Set up the DECL_TEMPLATE_INFO for DECL. */ DECL_TEMPLATE_INFO (decl) = build_template_info (tmpl, targs); if (was_template_id) TINFO_USED_TEMPLATE_ID (DECL_TEMPLATE_INFO (decl)) = true; /* Inherit default function arguments from the template DECL is specializing. */ if (DECL_FUNCTION_TEMPLATE_P (tmpl)) copy_default_args_to_explicit_spec (decl); /* This specialization has the same protection as the template it specializes. */ TREE_PRIVATE (decl) = TREE_PRIVATE (gen_tmpl); TREE_PROTECTED (decl) = TREE_PROTECTED (gen_tmpl); /* 7.1.1-1 [dcl.stc] A storage-class-specifier shall not be specified in an explicit specialization... The parser rejects these, so unless action is taken here, explicit function specializations will always appear with global linkage. The action recommended by the C++ CWG in response to C++ defect report 605 is to make the storage class and linkage of the explicit specialization match the templated function: http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#605 */ if (tsk == tsk_expl_spec && DECL_FUNCTION_TEMPLATE_P (gen_tmpl)) { tree tmpl_func = DECL_TEMPLATE_RESULT (gen_tmpl); gcc_assert (TREE_CODE (tmpl_func) == FUNCTION_DECL); /* A concept cannot be specialized. */ if (DECL_DECLARED_CONCEPT_P (tmpl_func)) { error ("explicit specialization of function concept %qD", gen_tmpl); return error_mark_node; } /* This specialization has the same linkage and visibility as the function template it specializes. */ TREE_PUBLIC (decl) = TREE_PUBLIC (tmpl_func); if (! TREE_PUBLIC (decl)) { DECL_INTERFACE_KNOWN (decl) = 1; DECL_NOT_REALLY_EXTERN (decl) = 1; } DECL_THIS_STATIC (decl) = DECL_THIS_STATIC (tmpl_func); if (DECL_VISIBILITY_SPECIFIED (tmpl_func)) { DECL_VISIBILITY_SPECIFIED (decl) = 1; DECL_VISIBILITY (decl) = DECL_VISIBILITY (tmpl_func); } } /* If DECL is a friend declaration, declared using an unqualified name, the namespace associated with DECL may have been set incorrectly. For example, in: template <typename T> void f(T); namespace N { struct S { friend void f<int>(int); } } we will have set the DECL_CONTEXT for the friend declaration to N, rather than to the global namespace. */ if (DECL_NAMESPACE_SCOPE_P (decl)) DECL_CONTEXT (decl) = DECL_CONTEXT (tmpl); if (is_friend && !have_def) /* This is not really a declaration of a specialization. It's just the name of an instantiation. But, it's not a request for an instantiation, either. */ SET_DECL_IMPLICIT_INSTANTIATION (decl); else if (TREE_CODE (decl) == FUNCTION_DECL) /* A specialization is not necessarily COMDAT. */ DECL_COMDAT (decl) = (TREE_PUBLIC (decl) && DECL_DECLARED_INLINE_P (decl)); else if (VAR_P (decl)) DECL_COMDAT (decl) = false; /* If this is a full specialization, register it so that we can find it again. Partial specializations will be registered in process_partial_specialization. */ if (!processing_template_decl) { warn_spec_missing_attributes (gen_tmpl, decl, attrlist); decl = register_specialization (decl, gen_tmpl, targs, is_friend, 0); } /* A 'structor should already have clones. */ gcc_assert (decl == error_mark_node || variable_template_p (tmpl) || !(DECL_CONSTRUCTOR_P (decl) || DECL_DESTRUCTOR_P (decl)) || DECL_CLONED_FUNCTION_P (DECL_CHAIN (decl))); } } return decl; } /* Returns 1 iff PARMS1 and PARMS2 are identical sets of template parameters. These are represented in the same format used for DECL_TEMPLATE_PARMS. */ int comp_template_parms (const_tree parms1, const_tree parms2) { const_tree p1; const_tree p2; if (parms1 == parms2) return 1; for (p1 = parms1, p2 = parms2; p1 != NULL_TREE && p2 != NULL_TREE; p1 = TREE_CHAIN (p1), p2 = TREE_CHAIN (p2)) { tree t1 = TREE_VALUE (p1); tree t2 = TREE_VALUE (p2); int i; gcc_assert (TREE_CODE (t1) == TREE_VEC); gcc_assert (TREE_CODE (t2) == TREE_VEC); if (TREE_VEC_LENGTH (t1) != TREE_VEC_LENGTH (t2)) return 0; for (i = 0; i < TREE_VEC_LENGTH (t2); ++i) { tree parm1 = TREE_VALUE (TREE_VEC_ELT (t1, i)); tree parm2 = TREE_VALUE (TREE_VEC_ELT (t2, i)); /* If either of the template parameters are invalid, assume they match for the sake of error recovery. */ if (error_operand_p (parm1) || error_operand_p (parm2)) return 1; if (TREE_CODE (parm1) != TREE_CODE (parm2)) return 0; if (TREE_CODE (parm1) == TEMPLATE_TYPE_PARM && (TEMPLATE_TYPE_PARAMETER_PACK (parm1) == TEMPLATE_TYPE_PARAMETER_PACK (parm2))) continue; else if (!same_type_p (TREE_TYPE (parm1), TREE_TYPE (parm2))) return 0; } } if ((p1 != NULL_TREE) != (p2 != NULL_TREE)) /* One set of parameters has more parameters lists than the other. */ return 0; return 1; } /* Determine whether PARM is a parameter pack. */ bool template_parameter_pack_p (const_tree parm) { /* Determine if we have a non-type template parameter pack. */ if (TREE_CODE (parm) == PARM_DECL) return (DECL_TEMPLATE_PARM_P (parm) && TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))); if (TREE_CODE (parm) == TEMPLATE_PARM_INDEX) return TEMPLATE_PARM_PARAMETER_PACK (parm); /* If this is a list of template parameters, we could get a TYPE_DECL or a TEMPLATE_DECL. */ if (TREE_CODE (parm) == TYPE_DECL || TREE_CODE (parm) == TEMPLATE_DECL) parm = TREE_TYPE (parm); /* Otherwise it must be a type template parameter. */ return ((TREE_CODE (parm) == TEMPLATE_TYPE_PARM || TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM) && TEMPLATE_TYPE_PARAMETER_PACK (parm)); } /* Determine if T is a function parameter pack. */ bool function_parameter_pack_p (const_tree t) { if (t && TREE_CODE (t) == PARM_DECL) return DECL_PACK_P (t); return false; } /* Return the function template declaration of PRIMARY_FUNC_TMPL_INST. PRIMARY_FUNC_TMPL_INST is a primary function template instantiation. */ tree get_function_template_decl (const_tree primary_func_tmpl_inst) { if (! primary_func_tmpl_inst || TREE_CODE (primary_func_tmpl_inst) != FUNCTION_DECL || ! primary_template_specialization_p (primary_func_tmpl_inst)) return NULL; return DECL_TEMPLATE_RESULT (DECL_TI_TEMPLATE (primary_func_tmpl_inst)); } /* Return true iff the function parameter PARAM_DECL was expanded from the function parameter pack PACK. */ bool function_parameter_expanded_from_pack_p (tree param_decl, tree pack) { if (DECL_ARTIFICIAL (param_decl) || !function_parameter_pack_p (pack)) return false; /* The parameter pack and its pack arguments have the same DECL_PARM_INDEX. */ return DECL_PARM_INDEX (pack) == DECL_PARM_INDEX (param_decl); } /* Determine whether ARGS describes a variadic template args list, i.e., one that is terminated by a template argument pack. */ static bool template_args_variadic_p (tree args) { int nargs; tree last_parm; if (args == NULL_TREE) return false; args = INNERMOST_TEMPLATE_ARGS (args); nargs = TREE_VEC_LENGTH (args); if (nargs == 0) return false; last_parm = TREE_VEC_ELT (args, nargs - 1); return ARGUMENT_PACK_P (last_parm); } /* Generate a new name for the parameter pack name NAME (an IDENTIFIER_NODE) that incorporates its */ static tree make_ith_pack_parameter_name (tree name, int i) { /* Munge the name to include the parameter index. */ #define NUMBUF_LEN 128 char numbuf[NUMBUF_LEN]; char* newname; int newname_len; if (name == NULL_TREE) return name; snprintf (numbuf, NUMBUF_LEN, "%i", i); newname_len = IDENTIFIER_LENGTH (name) + strlen (numbuf) + 2; newname = (char*)alloca (newname_len); snprintf (newname, newname_len, "%s#%i", IDENTIFIER_POINTER (name), i); return get_identifier (newname); } /* Return true if T is a primary function, class or alias template specialization, not including the template pattern. */ bool primary_template_specialization_p (const_tree t) { if (!t) return false; if (TREE_CODE (t) == FUNCTION_DECL || VAR_P (t)) return (DECL_LANG_SPECIFIC (t) && DECL_USE_TEMPLATE (t) && DECL_TEMPLATE_INFO (t) && PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (t))); else if (CLASS_TYPE_P (t) && !TYPE_DECL_ALIAS_P (TYPE_NAME (t))) return (CLASSTYPE_TEMPLATE_INFO (t) && CLASSTYPE_USE_TEMPLATE (t) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (t))); else if (alias_template_specialization_p (t)) return true; return false; } /* Return true if PARM is a template template parameter. */ bool template_template_parameter_p (const_tree parm) { return DECL_TEMPLATE_TEMPLATE_PARM_P (parm); } /* Return true iff PARM is a DECL representing a type template parameter. */ bool template_type_parameter_p (const_tree parm) { return (parm && (TREE_CODE (parm) == TYPE_DECL || TREE_CODE (parm) == TEMPLATE_DECL) && DECL_TEMPLATE_PARM_P (parm)); } /* Return the template parameters of T if T is a primary template instantiation, NULL otherwise. */ tree get_primary_template_innermost_parameters (const_tree t) { tree parms = NULL, template_info = NULL; if ((template_info = get_template_info (t)) && primary_template_specialization_p (t)) parms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (TI_TEMPLATE (template_info))); return parms; } /* Return the template parameters of the LEVELth level from the full list of template parameters PARMS. */ tree get_template_parms_at_level (tree parms, int level) { tree p; if (!parms || TREE_CODE (parms) != TREE_LIST || level > TMPL_PARMS_DEPTH (parms)) return NULL_TREE; for (p = parms; p; p = TREE_CHAIN (p)) if (TMPL_PARMS_DEPTH (p) == level) return p; return NULL_TREE; } /* Returns the template arguments of T if T is a template instantiation, NULL otherwise. */ tree get_template_innermost_arguments (const_tree t) { tree args = NULL, template_info = NULL; if ((template_info = get_template_info (t)) && TI_ARGS (template_info)) args = INNERMOST_TEMPLATE_ARGS (TI_ARGS (template_info)); return args; } /* Return the argument pack elements of T if T is a template argument pack, NULL otherwise. */ tree get_template_argument_pack_elems (const_tree t) { if (TREE_CODE (t) != TYPE_ARGUMENT_PACK && TREE_CODE (t) != NONTYPE_ARGUMENT_PACK) return NULL; return ARGUMENT_PACK_ARGS (t); } /* In an ARGUMENT_PACK_SELECT, the actual underlying argument that the ARGUMENT_PACK_SELECT represents. */ static tree argument_pack_select_arg (tree t) { tree args = ARGUMENT_PACK_ARGS (ARGUMENT_PACK_SELECT_FROM_PACK (t)); tree arg = TREE_VEC_ELT (args, ARGUMENT_PACK_SELECT_INDEX (t)); /* If the selected argument is an expansion E, that most likely means we were called from gen_elem_of_pack_expansion_instantiation during the substituting of an argument pack (of which the Ith element is a pack expansion, where I is ARGUMENT_PACK_SELECT_INDEX) into a pack expansion. In this case, the Ith element resulting from this substituting is going to be a pack expansion, which pattern is the pattern of E. Let's return the pattern of E, and gen_elem_of_pack_expansion_instantiation will build the resulting pack expansion from it. */ if (PACK_EXPANSION_P (arg)) { /* Make sure we aren't throwing away arg info. */ gcc_assert (!PACK_EXPANSION_EXTRA_ARGS (arg)); arg = PACK_EXPANSION_PATTERN (arg); } return arg; } /* True iff FN is a function representing a built-in variadic parameter pack. */ bool builtin_pack_fn_p (tree fn) { if (!fn || TREE_CODE (fn) != FUNCTION_DECL || !DECL_IS_BUILTIN (fn)) return false; if (id_equal (DECL_NAME (fn), "__integer_pack")) return true; return false; } /* True iff CALL is a call to a function representing a built-in variadic parameter pack. */ static bool builtin_pack_call_p (tree call) { if (TREE_CODE (call) != CALL_EXPR) return false; return builtin_pack_fn_p (CALL_EXPR_FN (call)); } /* Return a TREE_VEC for the expansion of __integer_pack(HI). */ static tree expand_integer_pack (tree call, tree args, tsubst_flags_t complain, tree in_decl) { tree ohi = CALL_EXPR_ARG (call, 0); tree hi = tsubst_copy_and_build (ohi, args, complain, in_decl, false/*fn*/, true/*int_cst*/); if (value_dependent_expression_p (hi)) { if (hi != ohi) { call = copy_node (call); CALL_EXPR_ARG (call, 0) = hi; } tree ex = make_pack_expansion (call, complain); tree vec = make_tree_vec (1); TREE_VEC_ELT (vec, 0) = ex; return vec; } else { hi = cxx_constant_value (hi); int len = valid_constant_size_p (hi) ? tree_to_shwi (hi) : -1; /* Calculate the largest value of len that won't make the size of the vec overflow an int. The compiler will exceed resource limits long before this, but it seems a decent place to diagnose. */ int max = ((INT_MAX - sizeof (tree_vec)) / sizeof (tree)) + 1; if (len < 0 || len > max) { if ((complain & tf_error) && hi != error_mark_node) error ("argument to __integer_pack must be between 0 and %d", max); return error_mark_node; } tree vec = make_tree_vec (len); for (int i = 0; i < len; ++i) TREE_VEC_ELT (vec, i) = size_int (i); return vec; } } /* Return a TREE_VEC for the expansion of built-in template parameter pack CALL. */ static tree expand_builtin_pack_call (tree call, tree args, tsubst_flags_t complain, tree in_decl) { if (!builtin_pack_call_p (call)) return NULL_TREE; tree fn = CALL_EXPR_FN (call); if (id_equal (DECL_NAME (fn), "__integer_pack")) return expand_integer_pack (call, args, complain, in_decl); return NULL_TREE; } /* Structure used to track the progress of find_parameter_packs_r. */ struct find_parameter_pack_data { /* TREE_LIST that will contain all of the parameter packs found by the traversal. */ tree* parameter_packs; /* Set of AST nodes that have been visited by the traversal. */ hash_set<tree> *visited; /* True iff we're making a type pack expansion. */ bool type_pack_expansion_p; }; /* Identifies all of the argument packs that occur in a template argument and appends them to the TREE_LIST inside DATA, which is a find_parameter_pack_data structure. This is a subroutine of make_pack_expansion and uses_parameter_packs. */ static tree find_parameter_packs_r (tree *tp, int *walk_subtrees, void* data) { tree t = *tp; struct find_parameter_pack_data* ppd = (struct find_parameter_pack_data*)data; bool parameter_pack_p = false; /* Handle type aliases/typedefs. */ if (TYPE_ALIAS_P (t)) { if (tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (t)) cp_walk_tree (&TI_ARGS (tinfo), &find_parameter_packs_r, ppd, ppd->visited); *walk_subtrees = 0; return NULL_TREE; } /* Identify whether this is a parameter pack or not. */ switch (TREE_CODE (t)) { case TEMPLATE_PARM_INDEX: if (TEMPLATE_PARM_PARAMETER_PACK (t)) parameter_pack_p = true; break; case TEMPLATE_TYPE_PARM: t = TYPE_MAIN_VARIANT (t); /* FALLTHRU */ case TEMPLATE_TEMPLATE_PARM: /* If the placeholder appears in the decl-specifier-seq of a function parameter pack (14.6.3), or the type-specifier-seq of a type-id that is a pack expansion, the invented template parameter is a template parameter pack. */ if (ppd->type_pack_expansion_p && is_auto (t)) TEMPLATE_TYPE_PARAMETER_PACK (t) = true; if (TEMPLATE_TYPE_PARAMETER_PACK (t)) parameter_pack_p = true; break; case FIELD_DECL: case PARM_DECL: if (DECL_PACK_P (t)) { /* We don't want to walk into the type of a PARM_DECL, because we don't want to see the type parameter pack. */ *walk_subtrees = 0; parameter_pack_p = true; } break; case VAR_DECL: if (DECL_PACK_P (t)) { /* We don't want to walk into the type of a variadic capture proxy, because we don't want to see the type parameter pack. */ *walk_subtrees = 0; parameter_pack_p = true; } else if (variable_template_specialization_p (t)) { cp_walk_tree (&DECL_TI_ARGS (t), find_parameter_packs_r, ppd, ppd->visited); *walk_subtrees = 0; } break; case CALL_EXPR: if (builtin_pack_call_p (t)) parameter_pack_p = true; break; case BASES: parameter_pack_p = true; break; default: /* Not a parameter pack. */ break; } if (parameter_pack_p) { /* Add this parameter pack to the list. */ *ppd->parameter_packs = tree_cons (NULL_TREE, t, *ppd->parameter_packs); } if (TYPE_P (t)) cp_walk_tree (&TYPE_CONTEXT (t), &find_parameter_packs_r, ppd, ppd->visited); /* This switch statement will return immediately if we don't find a parameter pack. */ switch (TREE_CODE (t)) { case TEMPLATE_PARM_INDEX: return NULL_TREE; case BOUND_TEMPLATE_TEMPLATE_PARM: /* Check the template itself. */ cp_walk_tree (&TREE_TYPE (TYPE_TI_TEMPLATE (t)), &find_parameter_packs_r, ppd, ppd->visited); /* Check the template arguments. */ cp_walk_tree (&TYPE_TI_ARGS (t), &find_parameter_packs_r, ppd, ppd->visited); *walk_subtrees = 0; return NULL_TREE; case TEMPLATE_TYPE_PARM: case TEMPLATE_TEMPLATE_PARM: return NULL_TREE; case PARM_DECL: return NULL_TREE; case DECL_EXPR: /* Ignore the declaration of a capture proxy for a parameter pack. */ if (is_capture_proxy (DECL_EXPR_DECL (t))) *walk_subtrees = 0; return NULL_TREE; case RECORD_TYPE: if (TYPE_PTRMEMFUNC_P (t)) return NULL_TREE; /* Fall through. */ case UNION_TYPE: case ENUMERAL_TYPE: if (TYPE_TEMPLATE_INFO (t)) cp_walk_tree (&TYPE_TI_ARGS (t), &find_parameter_packs_r, ppd, ppd->visited); *walk_subtrees = 0; return NULL_TREE; case TEMPLATE_DECL: if (!DECL_TEMPLATE_TEMPLATE_PARM_P (t)) return NULL_TREE; gcc_fallthrough(); case CONSTRUCTOR: cp_walk_tree (&TREE_TYPE (t), &find_parameter_packs_r, ppd, ppd->visited); return NULL_TREE; case TYPENAME_TYPE: cp_walk_tree (&TYPENAME_TYPE_FULLNAME (t), &find_parameter_packs_r, ppd, ppd->visited); *walk_subtrees = 0; return NULL_TREE; case TYPE_PACK_EXPANSION: case EXPR_PACK_EXPANSION: *walk_subtrees = 0; return NULL_TREE; case INTEGER_TYPE: cp_walk_tree (&TYPE_MAX_VALUE (t), &find_parameter_packs_r, ppd, ppd->visited); *walk_subtrees = 0; return NULL_TREE; case IDENTIFIER_NODE: cp_walk_tree (&TREE_TYPE (t), &find_parameter_packs_r, ppd, ppd->visited); *walk_subtrees = 0; return NULL_TREE; case LAMBDA_EXPR: { /* Look at explicit captures. */ for (tree cap = LAMBDA_EXPR_CAPTURE_LIST (t); cap; cap = TREE_CHAIN (cap)) cp_walk_tree (&TREE_VALUE (cap), &find_parameter_packs_r, ppd, ppd->visited); /* Since we defer implicit capture, look in the body as well. */ tree fn = lambda_function (t); cp_walk_tree (&DECL_SAVED_TREE (fn), &find_parameter_packs_r, ppd, ppd->visited); *walk_subtrees = 0; return NULL_TREE; } case DECLTYPE_TYPE: { /* When traversing a DECLTYPE_TYPE_EXPR, we need to set type_pack_expansion_p to false so that any placeholders within the expression don't get marked as parameter packs. */ bool type_pack_expansion_p = ppd->type_pack_expansion_p; ppd->type_pack_expansion_p = false; cp_walk_tree (&DECLTYPE_TYPE_EXPR (t), &find_parameter_packs_r, ppd, ppd->visited); ppd->type_pack_expansion_p = type_pack_expansion_p; *walk_subtrees = 0; return NULL_TREE; } default: return NULL_TREE; } return NULL_TREE; } /* Determines if the expression or type T uses any parameter packs. */ bool uses_parameter_packs (tree t) { tree parameter_packs = NULL_TREE; struct find_parameter_pack_data ppd; ppd.parameter_packs = &parameter_packs; ppd.visited = new hash_set<tree>; ppd.type_pack_expansion_p = false; cp_walk_tree (&t, &find_parameter_packs_r, &ppd, ppd.visited); delete ppd.visited; return parameter_packs != NULL_TREE; } /* Turn ARG, which may be an expression, type, or a TREE_LIST representation a base-class initializer into a parameter pack expansion. If all goes well, the resulting node will be an EXPR_PACK_EXPANSION, TYPE_PACK_EXPANSION, or TREE_LIST, respectively. */ tree make_pack_expansion (tree arg, tsubst_flags_t complain) { tree result; tree parameter_packs = NULL_TREE; bool for_types = false; struct find_parameter_pack_data ppd; if (!arg || arg == error_mark_node) return arg; if (TREE_CODE (arg) == TREE_LIST && TREE_PURPOSE (arg)) { /* A TREE_LIST with a non-null TREE_PURPOSE is for a base class initializer. In this case, the TREE_PURPOSE will be a _TYPE node (representing the base class expansion we're initializing) and the TREE_VALUE will be a TREE_LIST containing the initialization arguments. The resulting expansion looks somewhat different from most expansions. Rather than returning just one _EXPANSION, we return a TREE_LIST whose TREE_PURPOSE is a TYPE_PACK_EXPANSION containing the bases that will be initialized. The TREE_VALUE will be identical to the original TREE_VALUE, which is a list of arguments that will be passed to each base. We do not introduce any new pack expansion nodes into the TREE_VALUE (although it is possible that some already exist), because the TREE_PURPOSE and TREE_VALUE all need to be expanded together with the same _EXPANSION node. Note that the TYPE_PACK_EXPANSION in the resulting TREE_PURPOSE will mention the parameter packs in both the bases and the arguments to the bases. */ tree purpose; tree value; tree parameter_packs = NULL_TREE; /* Determine which parameter packs will be used by the base class expansion. */ ppd.visited = new hash_set<tree>; ppd.parameter_packs = &parameter_packs; ppd.type_pack_expansion_p = true; gcc_assert (TYPE_P (TREE_PURPOSE (arg))); cp_walk_tree (&TREE_PURPOSE (arg), &find_parameter_packs_r, &ppd, ppd.visited); if (parameter_packs == NULL_TREE) { if (complain & tf_error) error ("base initializer expansion %qT contains no parameter packs", arg); delete ppd.visited; return error_mark_node; } if (TREE_VALUE (arg) != void_type_node) { /* Collect the sets of parameter packs used in each of the initialization arguments. */ for (value = TREE_VALUE (arg); value; value = TREE_CHAIN (value)) { /* Determine which parameter packs will be expanded in this argument. */ cp_walk_tree (&TREE_VALUE (value), &find_parameter_packs_r, &ppd, ppd.visited); } } delete ppd.visited; /* Create the pack expansion type for the base type. */ purpose = cxx_make_type (TYPE_PACK_EXPANSION); SET_PACK_EXPANSION_PATTERN (purpose, TREE_PURPOSE (arg)); PACK_EXPANSION_PARAMETER_PACKS (purpose) = parameter_packs; PACK_EXPANSION_LOCAL_P (purpose) = at_function_scope_p (); /* Just use structural equality for these TYPE_PACK_EXPANSIONS; they will rarely be compared to anything. */ SET_TYPE_STRUCTURAL_EQUALITY (purpose); return tree_cons (purpose, TREE_VALUE (arg), NULL_TREE); } if (TYPE_P (arg) || TREE_CODE (arg) == TEMPLATE_DECL) for_types = true; /* Build the PACK_EXPANSION_* node. */ result = for_types ? cxx_make_type (TYPE_PACK_EXPANSION) : make_node (EXPR_PACK_EXPANSION); SET_PACK_EXPANSION_PATTERN (result, arg); if (TREE_CODE (result) == EXPR_PACK_EXPANSION) { /* Propagate type and const-expression information. */ TREE_TYPE (result) = TREE_TYPE (arg); TREE_CONSTANT (result) = TREE_CONSTANT (arg); /* Mark this read now, since the expansion might be length 0. */ mark_exp_read (arg); } else /* Just use structural equality for these TYPE_PACK_EXPANSIONS; they will rarely be compared to anything. */ SET_TYPE_STRUCTURAL_EQUALITY (result); /* Determine which parameter packs will be expanded. */ ppd.parameter_packs = &parameter_packs; ppd.visited = new hash_set<tree>; ppd.type_pack_expansion_p = TYPE_P (arg); cp_walk_tree (&arg, &find_parameter_packs_r, &ppd, ppd.visited); delete ppd.visited; /* Make sure we found some parameter packs. */ if (parameter_packs == NULL_TREE) { if (complain & tf_error) { if (TYPE_P (arg)) error ("expansion pattern %qT contains no argument packs", arg); else error ("expansion pattern %qE contains no argument packs", arg); } return error_mark_node; } PACK_EXPANSION_PARAMETER_PACKS (result) = parameter_packs; PACK_EXPANSION_LOCAL_P (result) = at_function_scope_p (); return result; } /* Checks T for any "bare" parameter packs, which have not yet been expanded, and issues an error if any are found. This operation can only be done on full expressions or types (e.g., an expression statement, "if" condition, etc.), because we could have expressions like: foo(f(g(h(args)))...) where "args" is a parameter pack. check_for_bare_parameter_packs should not be called for the subexpressions args, h(args), g(h(args)), or f(g(h(args))), because we would produce erroneous error messages. Returns TRUE and emits an error if there were bare parameter packs, returns FALSE otherwise. */ bool check_for_bare_parameter_packs (tree t) { tree parameter_packs = NULL_TREE; struct find_parameter_pack_data ppd; if (!processing_template_decl || !t || t == error_mark_node) return false; /* A lambda might use a parameter pack from the containing context. */ if (current_class_type && LAMBDA_TYPE_P (current_class_type) && CLASSTYPE_TEMPLATE_INFO (current_class_type)) return false; if (TREE_CODE (t) == TYPE_DECL) t = TREE_TYPE (t); ppd.parameter_packs = &parameter_packs; ppd.visited = new hash_set<tree>; ppd.type_pack_expansion_p = false; cp_walk_tree (&t, &find_parameter_packs_r, &ppd, ppd.visited); delete ppd.visited; if (parameter_packs) { location_t loc = EXPR_LOC_OR_LOC (t, input_location); error_at (loc, "parameter packs not expanded with %<...%>:"); while (parameter_packs) { tree pack = TREE_VALUE (parameter_packs); tree name = NULL_TREE; if (TREE_CODE (pack) == TEMPLATE_TYPE_PARM || TREE_CODE (pack) == TEMPLATE_TEMPLATE_PARM) name = TYPE_NAME (pack); else if (TREE_CODE (pack) == TEMPLATE_PARM_INDEX) name = DECL_NAME (TEMPLATE_PARM_DECL (pack)); else if (TREE_CODE (pack) == CALL_EXPR) name = DECL_NAME (CALL_EXPR_FN (pack)); else name = DECL_NAME (pack); if (name) inform (loc, " %qD", name); else inform (loc, " <anonymous>"); parameter_packs = TREE_CHAIN (parameter_packs); } return true; } return false; } /* Expand any parameter packs that occur in the template arguments in ARGS. */ tree expand_template_argument_pack (tree args) { if (args == error_mark_node) return error_mark_node; tree result_args = NULL_TREE; int in_arg, out_arg = 0, nargs = args ? TREE_VEC_LENGTH (args) : 0; int num_result_args = -1; int non_default_args_count = -1; /* First, determine if we need to expand anything, and the number of slots we'll need. */ for (in_arg = 0; in_arg < nargs; ++in_arg) { tree arg = TREE_VEC_ELT (args, in_arg); if (arg == NULL_TREE) return args; if (ARGUMENT_PACK_P (arg)) { int num_packed = TREE_VEC_LENGTH (ARGUMENT_PACK_ARGS (arg)); if (num_result_args < 0) num_result_args = in_arg + num_packed; else num_result_args += num_packed; } else { if (num_result_args >= 0) num_result_args++; } } /* If no expansion is necessary, we're done. */ if (num_result_args < 0) return args; /* Expand arguments. */ result_args = make_tree_vec (num_result_args); if (NON_DEFAULT_TEMPLATE_ARGS_COUNT (args)) non_default_args_count = GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (args); for (in_arg = 0; in_arg < nargs; ++in_arg) { tree arg = TREE_VEC_ELT (args, in_arg); if (ARGUMENT_PACK_P (arg)) { tree packed = ARGUMENT_PACK_ARGS (arg); int i, num_packed = TREE_VEC_LENGTH (packed); for (i = 0; i < num_packed; ++i, ++out_arg) TREE_VEC_ELT (result_args, out_arg) = TREE_VEC_ELT(packed, i); if (non_default_args_count > 0) non_default_args_count += num_packed - 1; } else { TREE_VEC_ELT (result_args, out_arg) = arg; ++out_arg; } } if (non_default_args_count >= 0) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (result_args, non_default_args_count); return result_args; } /* Checks if DECL shadows a template parameter. [temp.local]: A template-parameter shall not be redeclared within its scope (including nested scopes). Emits an error and returns TRUE if the DECL shadows a parameter, returns FALSE otherwise. */ bool check_template_shadow (tree decl) { tree olddecl; /* If we're not in a template, we can't possibly shadow a template parameter. */ if (!current_template_parms) return true; /* Figure out what we're shadowing. */ decl = OVL_FIRST (decl); olddecl = innermost_non_namespace_value (DECL_NAME (decl)); /* If there's no previous binding for this name, we're not shadowing anything, let alone a template parameter. */ if (!olddecl) return true; /* If we're not shadowing a template parameter, we're done. Note that OLDDECL might be an OVERLOAD (or perhaps even an ERROR_MARK), so we can't just blithely assume it to be a _DECL node. */ if (!DECL_P (olddecl) || !DECL_TEMPLATE_PARM_P (olddecl)) return true; /* We check for decl != olddecl to avoid bogus errors for using a name inside a class. We check TPFI to avoid duplicate errors for inline member templates. */ if (decl == olddecl || (DECL_TEMPLATE_PARM_P (decl) && TEMPLATE_PARMS_FOR_INLINE (current_template_parms))) return true; /* Don't complain about the injected class name, as we've already complained about the class itself. */ if (DECL_SELF_REFERENCE_P (decl)) return false; if (DECL_TEMPLATE_PARM_P (decl)) error ("declaration of template parameter %q+D shadows " "template parameter", decl); else error ("declaration of %q+#D shadows template parameter", decl); inform (DECL_SOURCE_LOCATION (olddecl), "template parameter %qD declared here", olddecl); return false; } /* Return a new TEMPLATE_PARM_INDEX with the indicated INDEX, LEVEL, ORIG_LEVEL, DECL, and TYPE. */ static tree build_template_parm_index (int index, int level, int orig_level, tree decl, tree type) { tree t = make_node (TEMPLATE_PARM_INDEX); TEMPLATE_PARM_IDX (t) = index; TEMPLATE_PARM_LEVEL (t) = level; TEMPLATE_PARM_ORIG_LEVEL (t) = orig_level; TEMPLATE_PARM_DECL (t) = decl; TREE_TYPE (t) = type; TREE_CONSTANT (t) = TREE_CONSTANT (decl); TREE_READONLY (t) = TREE_READONLY (decl); return t; } /* Find the canonical type parameter for the given template type parameter. Returns the canonical type parameter, which may be TYPE if no such parameter existed. */ static tree canonical_type_parameter (tree type) { tree list; int idx = TEMPLATE_TYPE_IDX (type); if (!canonical_template_parms) vec_alloc (canonical_template_parms, idx + 1); if (canonical_template_parms->length () <= (unsigned) idx) vec_safe_grow_cleared (canonical_template_parms, idx + 1); list = (*canonical_template_parms)[idx]; while (list && !comptypes (type, TREE_VALUE (list), COMPARE_STRUCTURAL)) list = TREE_CHAIN (list); if (list) return TREE_VALUE (list); else { (*canonical_template_parms)[idx] = tree_cons (NULL_TREE, type, (*canonical_template_parms)[idx]); return type; } } /* Return a TEMPLATE_PARM_INDEX, similar to INDEX, but whose TEMPLATE_PARM_LEVEL has been decreased by LEVELS. If such a TEMPLATE_PARM_INDEX already exists, it is returned; otherwise, a new one is created. */ static tree reduce_template_parm_level (tree index, tree type, int levels, tree args, tsubst_flags_t complain) { if (TEMPLATE_PARM_DESCENDANTS (index) == NULL_TREE || (TEMPLATE_PARM_LEVEL (TEMPLATE_PARM_DESCENDANTS (index)) != TEMPLATE_PARM_LEVEL (index) - levels) || !same_type_p (type, TREE_TYPE (TEMPLATE_PARM_DESCENDANTS (index)))) { tree orig_decl = TEMPLATE_PARM_DECL (index); tree decl, t; decl = build_decl (DECL_SOURCE_LOCATION (orig_decl), TREE_CODE (orig_decl), DECL_NAME (orig_decl), type); TREE_CONSTANT (decl) = TREE_CONSTANT (orig_decl); TREE_READONLY (decl) = TREE_READONLY (orig_decl); DECL_ARTIFICIAL (decl) = 1; SET_DECL_TEMPLATE_PARM_P (decl); t = build_template_parm_index (TEMPLATE_PARM_IDX (index), TEMPLATE_PARM_LEVEL (index) - levels, TEMPLATE_PARM_ORIG_LEVEL (index), decl, type); TEMPLATE_PARM_DESCENDANTS (index) = t; TEMPLATE_PARM_PARAMETER_PACK (t) = TEMPLATE_PARM_PARAMETER_PACK (index); /* Template template parameters need this. */ if (TREE_CODE (decl) == TEMPLATE_DECL) { DECL_TEMPLATE_RESULT (decl) = build_decl (DECL_SOURCE_LOCATION (decl), TYPE_DECL, DECL_NAME (decl), type); DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (decl)) = true; DECL_TEMPLATE_PARMS (decl) = tsubst_template_parms (DECL_TEMPLATE_PARMS (orig_decl), args, complain); } } return TEMPLATE_PARM_DESCENDANTS (index); } /* Process information from new template parameter PARM and append it to the LIST being built. This new parameter is a non-type parameter iff IS_NON_TYPE is true. This new parameter is a parameter pack iff IS_PARAMETER_PACK is true. The location of PARM is in PARM_LOC. */ tree process_template_parm (tree list, location_t parm_loc, tree parm, bool is_non_type, bool is_parameter_pack) { tree decl = 0; int idx = 0; gcc_assert (TREE_CODE (parm) == TREE_LIST); tree defval = TREE_PURPOSE (parm); tree constr = TREE_TYPE (parm); if (list) { tree p = tree_last (list); if (p && TREE_VALUE (p) != error_mark_node) { p = TREE_VALUE (p); if (TREE_CODE (p) == TYPE_DECL || TREE_CODE (p) == TEMPLATE_DECL) idx = TEMPLATE_TYPE_IDX (TREE_TYPE (p)); else idx = TEMPLATE_PARM_IDX (DECL_INITIAL (p)); } ++idx; } if (is_non_type) { parm = TREE_VALUE (parm); SET_DECL_TEMPLATE_PARM_P (parm); if (TREE_TYPE (parm) != error_mark_node) { /* [temp.param] The top-level cv-qualifiers on the template-parameter are ignored when determining its type. */ TREE_TYPE (parm) = TYPE_MAIN_VARIANT (TREE_TYPE (parm)); if (invalid_nontype_parm_type_p (TREE_TYPE (parm), 1)) TREE_TYPE (parm) = error_mark_node; else if (uses_parameter_packs (TREE_TYPE (parm)) && !is_parameter_pack /* If we're in a nested template parameter list, the template template parameter could be a parameter pack. */ && processing_template_parmlist == 1) { /* This template parameter is not a parameter pack, but it should be. Complain about "bare" parameter packs. */ check_for_bare_parameter_packs (TREE_TYPE (parm)); /* Recover by calling this a parameter pack. */ is_parameter_pack = true; } } /* A template parameter is not modifiable. */ TREE_CONSTANT (parm) = 1; TREE_READONLY (parm) = 1; decl = build_decl (parm_loc, CONST_DECL, DECL_NAME (parm), TREE_TYPE (parm)); TREE_CONSTANT (decl) = 1; TREE_READONLY (decl) = 1; DECL_INITIAL (parm) = DECL_INITIAL (decl) = build_template_parm_index (idx, processing_template_decl, processing_template_decl, decl, TREE_TYPE (parm)); TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm)) = is_parameter_pack; } else { tree t; parm = TREE_VALUE (TREE_VALUE (parm)); if (parm && TREE_CODE (parm) == TEMPLATE_DECL) { t = cxx_make_type (TEMPLATE_TEMPLATE_PARM); /* This is for distinguishing between real templates and template template parameters */ TREE_TYPE (parm) = t; TREE_TYPE (DECL_TEMPLATE_RESULT (parm)) = t; decl = parm; } else { t = cxx_make_type (TEMPLATE_TYPE_PARM); /* parm is either IDENTIFIER_NODE or NULL_TREE. */ decl = build_decl (parm_loc, TYPE_DECL, parm, t); } TYPE_NAME (t) = decl; TYPE_STUB_DECL (t) = decl; parm = decl; TEMPLATE_TYPE_PARM_INDEX (t) = build_template_parm_index (idx, processing_template_decl, processing_template_decl, decl, TREE_TYPE (parm)); TEMPLATE_TYPE_PARAMETER_PACK (t) = is_parameter_pack; TYPE_CANONICAL (t) = canonical_type_parameter (t); } DECL_ARTIFICIAL (decl) = 1; SET_DECL_TEMPLATE_PARM_P (decl); /* Build requirements for the type/template parameter. This must be done after SET_DECL_TEMPLATE_PARM_P or process_template_parm could fail. */ tree reqs = finish_shorthand_constraint (parm, constr); pushdecl (decl); if (defval && TREE_CODE (defval) == OVERLOAD) lookup_keep (defval, true); /* Build the parameter node linking the parameter declaration, its default argument (if any), and its constraints (if any). */ parm = build_tree_list (defval, parm); TEMPLATE_PARM_CONSTRAINTS (parm) = reqs; return chainon (list, parm); } /* The end of a template parameter list has been reached. Process the tree list into a parameter vector, converting each parameter into a more useful form. Type parameters are saved as IDENTIFIER_NODEs, and others as PARM_DECLs. */ tree end_template_parm_list (tree parms) { int nparms; tree parm, next; tree saved_parmlist = make_tree_vec (list_length (parms)); /* Pop the dummy parameter level and add the real one. */ current_template_parms = TREE_CHAIN (current_template_parms); current_template_parms = tree_cons (size_int (processing_template_decl), saved_parmlist, current_template_parms); for (parm = parms, nparms = 0; parm; parm = next, nparms++) { next = TREE_CHAIN (parm); TREE_VEC_ELT (saved_parmlist, nparms) = parm; TREE_CHAIN (parm) = NULL_TREE; } --processing_template_parmlist; return saved_parmlist; } // Explicitly indicate the end of the template parameter list. We assume // that the current template parameters have been constructed and/or // managed explicitly, as when creating new template template parameters // from a shorthand constraint. void end_template_parm_list () { --processing_template_parmlist; } /* end_template_decl is called after a template declaration is seen. */ void end_template_decl (void) { reset_specialization (); if (! processing_template_decl) return; /* This matches the pushlevel in begin_template_parm_list. */ finish_scope (); --processing_template_decl; current_template_parms = TREE_CHAIN (current_template_parms); } /* Takes a TREE_LIST representing a template parameter and convert it into an argument suitable to be passed to the type substitution functions. Note that If the TREE_LIST contains an error_mark node, the returned argument is error_mark_node. */ tree template_parm_to_arg (tree t) { if (t == NULL_TREE || TREE_CODE (t) != TREE_LIST) return t; if (error_operand_p (TREE_VALUE (t))) return error_mark_node; t = TREE_VALUE (t); if (TREE_CODE (t) == TYPE_DECL || TREE_CODE (t) == TEMPLATE_DECL) { t = TREE_TYPE (t); if (TEMPLATE_TYPE_PARAMETER_PACK (t)) { /* Turn this argument into a TYPE_ARGUMENT_PACK with a single element, which expands T. */ tree vec = make_tree_vec (1); if (CHECKING_P) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (vec, TREE_VEC_LENGTH (vec)); TREE_VEC_ELT (vec, 0) = make_pack_expansion (t); t = cxx_make_type (TYPE_ARGUMENT_PACK); SET_ARGUMENT_PACK_ARGS (t, vec); } } else { t = DECL_INITIAL (t); if (TEMPLATE_PARM_PARAMETER_PACK (t)) { /* Turn this argument into a NONTYPE_ARGUMENT_PACK with a single element, which expands T. */ tree vec = make_tree_vec (1); if (CHECKING_P) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (vec, TREE_VEC_LENGTH (vec)); t = convert_from_reference (t); TREE_VEC_ELT (vec, 0) = make_pack_expansion (t); t = make_node (NONTYPE_ARGUMENT_PACK); SET_ARGUMENT_PACK_ARGS (t, vec); } else t = convert_from_reference (t); } return t; } /* Given a single level of template parameters (a TREE_VEC), return it as a set of template arguments. */ static tree template_parms_level_to_args (tree parms) { tree a = copy_node (parms); TREE_TYPE (a) = NULL_TREE; for (int i = TREE_VEC_LENGTH (a) - 1; i >= 0; --i) TREE_VEC_ELT (a, i) = template_parm_to_arg (TREE_VEC_ELT (a, i)); if (CHECKING_P) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (a, TREE_VEC_LENGTH (a)); return a; } /* Given a set of template parameters, return them as a set of template arguments. The template parameters are represented as a TREE_VEC, in the form documented in cp-tree.h for template arguments. */ static tree template_parms_to_args (tree parms) { tree header; tree args = NULL_TREE; int length = TMPL_PARMS_DEPTH (parms); int l = length; /* If there is only one level of template parameters, we do not create a TREE_VEC of TREE_VECs. Instead, we return a single TREE_VEC containing the arguments. */ if (length > 1) args = make_tree_vec (length); for (header = parms; header; header = TREE_CHAIN (header)) { tree a = template_parms_level_to_args (TREE_VALUE (header)); if (length > 1) TREE_VEC_ELT (args, --l) = a; else args = a; } return args; } /* Within the declaration of a template, return the currently active template parameters as an argument TREE_VEC. */ static tree current_template_args (void) { return template_parms_to_args (current_template_parms); } /* Update the declared TYPE by doing any lookups which were thought to be dependent, but are not now that we know the SCOPE of the declarator. */ tree maybe_update_decl_type (tree orig_type, tree scope) { tree type = orig_type; if (type == NULL_TREE) return type; if (TREE_CODE (orig_type) == TYPE_DECL) type = TREE_TYPE (type); if (scope && TYPE_P (scope) && dependent_type_p (scope) && dependent_type_p (type) /* Don't bother building up the args in this case. */ && TREE_CODE (type) != TEMPLATE_TYPE_PARM) { /* tsubst in the args corresponding to the template parameters, including auto if present. Most things will be unchanged, but make_typename_type and tsubst_qualified_id will resolve TYPENAME_TYPEs and SCOPE_REFs that were previously dependent. */ tree args = current_template_args (); tree auto_node = type_uses_auto (type); tree pushed; if (auto_node) { tree auto_vec = make_tree_vec (1); TREE_VEC_ELT (auto_vec, 0) = auto_node; args = add_to_template_args (args, auto_vec); } pushed = push_scope (scope); type = tsubst (type, args, tf_warning_or_error, NULL_TREE); if (pushed) pop_scope (scope); } if (type == error_mark_node) return orig_type; if (TREE_CODE (orig_type) == TYPE_DECL) { if (same_type_p (type, TREE_TYPE (orig_type))) type = orig_type; else type = TYPE_NAME (type); } return type; } /* Return a TEMPLATE_DECL corresponding to DECL, using the indicated template PARMS and constraints, CONSTR. If MEMBER_TEMPLATE_P is true, the new template is a member template. */ tree build_template_decl (tree decl, tree parms, bool member_template_p) { tree tmpl = build_lang_decl (TEMPLATE_DECL, DECL_NAME (decl), NULL_TREE); SET_DECL_LANGUAGE (tmpl, DECL_LANGUAGE (decl)); DECL_TEMPLATE_PARMS (tmpl) = parms; DECL_CONTEXT (tmpl) = DECL_CONTEXT (decl); DECL_SOURCE_LOCATION (tmpl) = DECL_SOURCE_LOCATION (decl); DECL_MEMBER_TEMPLATE_P (tmpl) = member_template_p; return tmpl; } struct template_parm_data { /* The level of the template parameters we are currently processing. */ int level; /* The index of the specialization argument we are currently processing. */ int current_arg; /* An array whose size is the number of template parameters. The elements are nonzero if the parameter has been used in any one of the arguments processed so far. */ int* parms; /* An array whose size is the number of template arguments. The elements are nonzero if the argument makes use of template parameters of this level. */ int* arg_uses_template_parms; }; /* Subroutine of push_template_decl used to see if each template parameter in a partial specialization is used in the explicit argument list. If T is of the LEVEL given in DATA (which is treated as a template_parm_data*), then DATA->PARMS is marked appropriately. */ static int mark_template_parm (tree t, void* data) { int level; int idx; struct template_parm_data* tpd = (struct template_parm_data*) data; template_parm_level_and_index (t, &level, &idx); if (level == tpd->level) { tpd->parms[idx] = 1; tpd->arg_uses_template_parms[tpd->current_arg] = 1; } /* In C++17 the type of a non-type argument is a deduced context. */ if (cxx_dialect >= cxx17 && TREE_CODE (t) == TEMPLATE_PARM_INDEX) for_each_template_parm (TREE_TYPE (t), &mark_template_parm, data, NULL, /*include_nondeduced_p=*/false); /* Return zero so that for_each_template_parm will continue the traversal of the tree; we want to mark *every* template parm. */ return 0; } /* Process the partial specialization DECL. */ static tree process_partial_specialization (tree decl) { tree type = TREE_TYPE (decl); tree tinfo = get_template_info (decl); tree maintmpl = TI_TEMPLATE (tinfo); tree specargs = TI_ARGS (tinfo); tree inner_args = INNERMOST_TEMPLATE_ARGS (specargs); tree main_inner_parms = DECL_INNERMOST_TEMPLATE_PARMS (maintmpl); tree inner_parms; tree inst; int nargs = TREE_VEC_LENGTH (inner_args); int ntparms; int i; bool did_error_intro = false; struct template_parm_data tpd; struct template_parm_data tpd2; gcc_assert (current_template_parms); /* A concept cannot be specialized. */ if (flag_concepts && variable_concept_p (maintmpl)) { error ("specialization of variable concept %q#D", maintmpl); return error_mark_node; } inner_parms = INNERMOST_TEMPLATE_PARMS (current_template_parms); ntparms = TREE_VEC_LENGTH (inner_parms); /* We check that each of the template parameters given in the partial specialization is used in the argument list to the specialization. For example: template <class T> struct S; template <class T> struct S<T*>; The second declaration is OK because `T*' uses the template parameter T, whereas template <class T> struct S<int>; is no good. Even trickier is: template <class T> struct S1 { template <class U> struct S2; template <class U> struct S2<T>; }; The S2<T> declaration is actually invalid; it is a full-specialization. Of course, template <class U> struct S2<T (*)(U)>; or some such would have been OK. */ tpd.level = TMPL_PARMS_DEPTH (current_template_parms); tpd.parms = XALLOCAVEC (int, ntparms); memset (tpd.parms, 0, sizeof (int) * ntparms); tpd.arg_uses_template_parms = XALLOCAVEC (int, nargs); memset (tpd.arg_uses_template_parms, 0, sizeof (int) * nargs); for (i = 0; i < nargs; ++i) { tpd.current_arg = i; for_each_template_parm (TREE_VEC_ELT (inner_args, i), &mark_template_parm, &tpd, NULL, /*include_nondeduced_p=*/false); } for (i = 0; i < ntparms; ++i) if (tpd.parms[i] == 0) { /* One of the template parms was not used in a deduced context in the specialization. */ if (!did_error_intro) { error ("template parameters not deducible in " "partial specialization:"); did_error_intro = true; } inform (input_location, " %qD", TREE_VALUE (TREE_VEC_ELT (inner_parms, i))); } if (did_error_intro) return error_mark_node; /* [temp.class.spec] The argument list of the specialization shall not be identical to the implicit argument list of the primary template. */ tree main_args = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (maintmpl))); if (comp_template_args (inner_args, INNERMOST_TEMPLATE_ARGS (main_args)) && (!flag_concepts || !strictly_subsumes (current_template_constraints (), get_constraints (maintmpl)))) { if (!flag_concepts) error ("partial specialization %q+D does not specialize " "any template arguments; to define the primary template, " "remove the template argument list", decl); else error ("partial specialization %q+D does not specialize any " "template arguments and is not more constrained than " "the primary template; to define the primary template, " "remove the template argument list", decl); inform (DECL_SOURCE_LOCATION (maintmpl), "primary template here"); } /* A partial specialization that replaces multiple parameters of the primary template with a pack expansion is less specialized for those parameters. */ if (nargs < DECL_NTPARMS (maintmpl)) { error ("partial specialization is not more specialized than the " "primary template because it replaces multiple parameters " "with a pack expansion"); inform (DECL_SOURCE_LOCATION (maintmpl), "primary template here"); /* Avoid crash in process_partial_specialization. */ return decl; } /* If we aren't in a dependent class, we can actually try deduction. */ else if (tpd.level == 1 /* FIXME we should be able to handle a partial specialization of a partial instantiation, but currently we can't (c++/41727). */ && TMPL_ARGS_DEPTH (specargs) == 1 && !get_partial_spec_bindings (maintmpl, maintmpl, specargs)) { if (permerror (input_location, "partial specialization %qD is not " "more specialized than", decl)) inform (DECL_SOURCE_LOCATION (maintmpl), "primary template %qD", maintmpl); } /* [temp.class.spec] A partially specialized non-type argument expression shall not involve template parameters of the partial specialization except when the argument expression is a simple identifier. The type of a template parameter corresponding to a specialized non-type argument shall not be dependent on a parameter of the specialization. Also, we verify that pack expansions only occur at the end of the argument list. */ gcc_assert (nargs == DECL_NTPARMS (maintmpl)); tpd2.parms = 0; for (i = 0; i < nargs; ++i) { tree parm = TREE_VALUE (TREE_VEC_ELT (main_inner_parms, i)); tree arg = TREE_VEC_ELT (inner_args, i); tree packed_args = NULL_TREE; int j, len = 1; if (ARGUMENT_PACK_P (arg)) { /* Extract the arguments from the argument pack. We'll be iterating over these in the following loop. */ packed_args = ARGUMENT_PACK_ARGS (arg); len = TREE_VEC_LENGTH (packed_args); } for (j = 0; j < len; j++) { if (packed_args) /* Get the Jth argument in the parameter pack. */ arg = TREE_VEC_ELT (packed_args, j); if (PACK_EXPANSION_P (arg)) { /* Pack expansions must come at the end of the argument list. */ if ((packed_args && j < len - 1) || (!packed_args && i < nargs - 1)) { if (TREE_CODE (arg) == EXPR_PACK_EXPANSION) error ("parameter pack argument %qE must be at the " "end of the template argument list", arg); else error ("parameter pack argument %qT must be at the " "end of the template argument list", arg); } } if (TREE_CODE (arg) == EXPR_PACK_EXPANSION) /* We only care about the pattern. */ arg = PACK_EXPANSION_PATTERN (arg); if (/* These first two lines are the `non-type' bit. */ !TYPE_P (arg) && TREE_CODE (arg) != TEMPLATE_DECL /* This next two lines are the `argument expression is not just a simple identifier' condition and also the `specialized non-type argument' bit. */ && TREE_CODE (arg) != TEMPLATE_PARM_INDEX && !(REFERENCE_REF_P (arg) && TREE_CODE (TREE_OPERAND (arg, 0)) == TEMPLATE_PARM_INDEX)) { if ((!packed_args && tpd.arg_uses_template_parms[i]) || (packed_args && uses_template_parms (arg))) error ("template argument %qE involves template parameter(s)", arg); else { /* Look at the corresponding template parameter, marking which template parameters its type depends upon. */ tree type = TREE_TYPE (parm); if (!tpd2.parms) { /* We haven't yet initialized TPD2. Do so now. */ tpd2.arg_uses_template_parms = XALLOCAVEC (int, nargs); /* The number of parameters here is the number in the main template, which, as checked in the assertion above, is NARGS. */ tpd2.parms = XALLOCAVEC (int, nargs); tpd2.level = TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (maintmpl)); } /* Mark the template parameters. But this time, we're looking for the template parameters of the main template, not in the specialization. */ tpd2.current_arg = i; tpd2.arg_uses_template_parms[i] = 0; memset (tpd2.parms, 0, sizeof (int) * nargs); for_each_template_parm (type, &mark_template_parm, &tpd2, NULL, /*include_nondeduced_p=*/false); if (tpd2.arg_uses_template_parms [i]) { /* The type depended on some template parameters. If they are fully specialized in the specialization, that's OK. */ int j; int count = 0; for (j = 0; j < nargs; ++j) if (tpd2.parms[j] != 0 && tpd.arg_uses_template_parms [j]) ++count; if (count != 0) error_n (input_location, count, "type %qT of template argument %qE depends " "on a template parameter", "type %qT of template argument %qE depends " "on template parameters", type, arg); } } } } } /* We should only get here once. */ if (TREE_CODE (decl) == TYPE_DECL) gcc_assert (!COMPLETE_TYPE_P (type)); // Build the template decl. tree tmpl = build_template_decl (decl, current_template_parms, DECL_MEMBER_TEMPLATE_P (maintmpl)); TREE_TYPE (tmpl) = type; DECL_TEMPLATE_RESULT (tmpl) = decl; SET_DECL_TEMPLATE_SPECIALIZATION (tmpl); DECL_TEMPLATE_INFO (tmpl) = build_template_info (maintmpl, specargs); DECL_PRIMARY_TEMPLATE (tmpl) = maintmpl; /* Give template template parms a DECL_CONTEXT of the template for which they are a parameter. */ for (i = 0; i < ntparms; ++i) { tree parm = TREE_VALUE (TREE_VEC_ELT (inner_parms, i)); if (TREE_CODE (parm) == TEMPLATE_DECL) DECL_CONTEXT (parm) = tmpl; } if (VAR_P (decl)) /* We didn't register this in check_explicit_specialization so we could wait until the constraints were set. */ decl = register_specialization (decl, maintmpl, specargs, false, 0); else associate_classtype_constraints (type); DECL_TEMPLATE_SPECIALIZATIONS (maintmpl) = tree_cons (specargs, tmpl, DECL_TEMPLATE_SPECIALIZATIONS (maintmpl)); TREE_TYPE (DECL_TEMPLATE_SPECIALIZATIONS (maintmpl)) = type; for (inst = DECL_TEMPLATE_INSTANTIATIONS (maintmpl); inst; inst = TREE_CHAIN (inst)) { tree instance = TREE_VALUE (inst); if (TYPE_P (instance) ? (COMPLETE_TYPE_P (instance) && CLASSTYPE_IMPLICIT_INSTANTIATION (instance)) : DECL_TEMPLATE_INSTANTIATION (instance)) { tree spec = most_specialized_partial_spec (instance, tf_none); tree inst_decl = (DECL_P (instance) ? instance : TYPE_NAME (instance)); if (!spec) /* OK */; else if (spec == error_mark_node) permerror (input_location, "declaration of %qD ambiguates earlier template " "instantiation for %qD", decl, inst_decl); else if (TREE_VALUE (spec) == tmpl) permerror (input_location, "partial specialization of %qD after instantiation " "of %qD", decl, inst_decl); } } return decl; } /* PARM is a template parameter of some form; return the corresponding TEMPLATE_PARM_INDEX. */ static tree get_template_parm_index (tree parm) { if (TREE_CODE (parm) == PARM_DECL || TREE_CODE (parm) == CONST_DECL) parm = DECL_INITIAL (parm); else if (TREE_CODE (parm) == TYPE_DECL || TREE_CODE (parm) == TEMPLATE_DECL) parm = TREE_TYPE (parm); if (TREE_CODE (parm) == TEMPLATE_TYPE_PARM || TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM || TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM) parm = TEMPLATE_TYPE_PARM_INDEX (parm); gcc_assert (TREE_CODE (parm) == TEMPLATE_PARM_INDEX); return parm; } /* Subroutine of fixed_parameter_pack_p below. Look for any template parameter packs used by the template parameter PARM. */ static void fixed_parameter_pack_p_1 (tree parm, struct find_parameter_pack_data *ppd) { /* A type parm can't refer to another parm. */ if (TREE_CODE (parm) == TYPE_DECL || parm == error_mark_node) return; else if (TREE_CODE (parm) == PARM_DECL) { cp_walk_tree (&TREE_TYPE (parm), &find_parameter_packs_r, ppd, ppd->visited); return; } gcc_assert (TREE_CODE (parm) == TEMPLATE_DECL); tree vec = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (parm)); for (int i = 0; i < TREE_VEC_LENGTH (vec); ++i) fixed_parameter_pack_p_1 (TREE_VALUE (TREE_VEC_ELT (vec, i)), ppd); } /* PARM is a template parameter pack. Return any parameter packs used in its type or the type of any of its template parameters. If there are any such packs, it will be instantiated into a fixed template parameter list by partial instantiation rather than be fully deduced. */ tree fixed_parameter_pack_p (tree parm) { /* This can only be true in a member template. */ if (TEMPLATE_PARM_ORIG_LEVEL (get_template_parm_index (parm)) < 2) return NULL_TREE; /* This can only be true for a parameter pack. */ if (!template_parameter_pack_p (parm)) return NULL_TREE; /* A type parm can't refer to another parm. */ if (TREE_CODE (parm) == TYPE_DECL) return NULL_TREE; tree parameter_packs = NULL_TREE; struct find_parameter_pack_data ppd; ppd.parameter_packs = &parameter_packs; ppd.visited = new hash_set<tree>; ppd.type_pack_expansion_p = false; fixed_parameter_pack_p_1 (parm, &ppd); delete ppd.visited; return parameter_packs; } /* Check that a template declaration's use of default arguments and parameter packs is not invalid. Here, PARMS are the template parameters. IS_PRIMARY is true if DECL is the thing declared by a primary template. IS_PARTIAL is true if DECL is a partial specialization. IS_FRIEND_DECL is nonzero if DECL is either a non-defining friend function template declaration or a friend class template declaration. In the function case, 1 indicates a declaration, 2 indicates a redeclaration. When IS_FRIEND_DECL=2, no errors are emitted for extraneous default arguments. Returns TRUE if there were no errors found, FALSE otherwise. */ bool check_default_tmpl_args (tree decl, tree parms, bool is_primary, bool is_partial, int is_friend_decl) { const char *msg; int last_level_to_check; tree parm_level; bool no_errors = true; /* [temp.param] A default template-argument shall not be specified in a function template declaration or a function template definition, nor in the template-parameter-list of the definition of a member of a class template. */ if (TREE_CODE (CP_DECL_CONTEXT (decl)) == FUNCTION_DECL || (TREE_CODE (decl) == FUNCTION_DECL && DECL_LOCAL_FUNCTION_P (decl))) /* You can't have a function template declaration in a local scope, nor you can you define a member of a class template in a local scope. */ return true; if ((TREE_CODE (decl) == TYPE_DECL && TREE_TYPE (decl) && LAMBDA_TYPE_P (TREE_TYPE (decl))) || (TREE_CODE (decl) == FUNCTION_DECL && LAMBDA_FUNCTION_P (decl))) /* A lambda doesn't have an explicit declaration; don't complain about the parms of the enclosing class. */ return true; if (current_class_type && !TYPE_BEING_DEFINED (current_class_type) && DECL_LANG_SPECIFIC (decl) && DECL_DECLARES_FUNCTION_P (decl) /* If this is either a friend defined in the scope of the class or a member function. */ && (DECL_FUNCTION_MEMBER_P (decl) ? same_type_p (DECL_CONTEXT (decl), current_class_type) : DECL_FRIEND_CONTEXT (decl) ? same_type_p (DECL_FRIEND_CONTEXT (decl), current_class_type) : false) /* And, if it was a member function, it really was defined in the scope of the class. */ && (!DECL_FUNCTION_MEMBER_P (decl) || DECL_INITIALIZED_IN_CLASS_P (decl))) /* We already checked these parameters when the template was declared, so there's no need to do it again now. This function was defined in class scope, but we're processing its body now that the class is complete. */ return true; /* Core issue 226 (C++0x only): the following only applies to class templates. */ if (is_primary && ((cxx_dialect == cxx98) || TREE_CODE (decl) != FUNCTION_DECL)) { /* [temp.param] If a template-parameter has a default template-argument, all subsequent template-parameters shall have a default template-argument supplied. */ for (parm_level = parms; parm_level; parm_level = TREE_CHAIN (parm_level)) { tree inner_parms = TREE_VALUE (parm_level); int ntparms = TREE_VEC_LENGTH (inner_parms); int seen_def_arg_p = 0; int i; for (i = 0; i < ntparms; ++i) { tree parm = TREE_VEC_ELT (inner_parms, i); if (parm == error_mark_node) continue; if (TREE_PURPOSE (parm)) seen_def_arg_p = 1; else if (seen_def_arg_p && !template_parameter_pack_p (TREE_VALUE (parm))) { error ("no default argument for %qD", TREE_VALUE (parm)); /* For better subsequent error-recovery, we indicate that there should have been a default argument. */ TREE_PURPOSE (parm) = error_mark_node; no_errors = false; } else if (!is_partial && !is_friend_decl /* Don't complain about an enclosing partial specialization. */ && parm_level == parms && TREE_CODE (decl) == TYPE_DECL && i < ntparms - 1 && template_parameter_pack_p (TREE_VALUE (parm)) /* A fixed parameter pack will be partially instantiated into a fixed length list. */ && !fixed_parameter_pack_p (TREE_VALUE (parm))) { /* A primary class template can only have one parameter pack, at the end of the template parameter list. */ error ("parameter pack %q+D must be at the end of the" " template parameter list", TREE_VALUE (parm)); TREE_VALUE (TREE_VEC_ELT (inner_parms, i)) = error_mark_node; no_errors = false; } } } } if (((cxx_dialect == cxx98) && TREE_CODE (decl) != TYPE_DECL) || is_partial || !is_primary || is_friend_decl) /* For an ordinary class template, default template arguments are allowed at the innermost level, e.g.: template <class T = int> struct S {}; but, in a partial specialization, they're not allowed even there, as we have in [temp.class.spec]: The template parameter list of a specialization shall not contain default template argument values. So, for a partial specialization, or for a function template (in C++98/C++03), we look at all of them. */ ; else /* But, for a primary class template that is not a partial specialization we look at all template parameters except the innermost ones. */ parms = TREE_CHAIN (parms); /* Figure out what error message to issue. */ if (is_friend_decl == 2) msg = G_("default template arguments may not be used in function template " "friend re-declaration"); else if (is_friend_decl) msg = G_("default template arguments may not be used in template " "friend declarations"); else if (TREE_CODE (decl) == FUNCTION_DECL && (cxx_dialect == cxx98)) msg = G_("default template arguments may not be used in function templates " "without -std=c++11 or -std=gnu++11"); else if (is_partial) msg = G_("default template arguments may not be used in " "partial specializations"); else if (current_class_type && CLASSTYPE_IS_TEMPLATE (current_class_type)) msg = G_("default argument for template parameter for class enclosing %qD"); else /* Per [temp.param]/9, "A default template-argument shall not be specified in the template-parameter-lists of the definition of a member of a class template that appears outside of the member's class.", thus if we aren't handling a member of a class template there is no need to examine the parameters. */ return true; if (current_class_type && TYPE_BEING_DEFINED (current_class_type)) /* If we're inside a class definition, there's no need to examine the parameters to the class itself. On the one hand, they will be checked when the class is defined, and, on the other, default arguments are valid in things like: template <class T = double> struct S { template <class U> void f(U); }; Here the default argument for `S' has no bearing on the declaration of `f'. */ last_level_to_check = template_class_depth (current_class_type) + 1; else /* Check everything. */ last_level_to_check = 0; for (parm_level = parms; parm_level && TMPL_PARMS_DEPTH (parm_level) >= last_level_to_check; parm_level = TREE_CHAIN (parm_level)) { tree inner_parms = TREE_VALUE (parm_level); int i; int ntparms; ntparms = TREE_VEC_LENGTH (inner_parms); for (i = 0; i < ntparms; ++i) { if (TREE_VEC_ELT (inner_parms, i) == error_mark_node) continue; if (TREE_PURPOSE (TREE_VEC_ELT (inner_parms, i))) { if (msg) { no_errors = false; if (is_friend_decl == 2) return no_errors; error (msg, decl); msg = 0; } /* Clear out the default argument so that we are not confused later. */ TREE_PURPOSE (TREE_VEC_ELT (inner_parms, i)) = NULL_TREE; } } /* At this point, if we're still interested in issuing messages, they must apply to classes surrounding the object declared. */ if (msg) msg = G_("default argument for template parameter for class " "enclosing %qD"); } return no_errors; } /* Worker for push_template_decl_real, called via for_each_template_parm. DATA is really an int, indicating the level of the parameters we are interested in. If T is a template parameter of that level, return nonzero. */ static int template_parm_this_level_p (tree t, void* data) { int this_level = *(int *)data; int level; if (TREE_CODE (t) == TEMPLATE_PARM_INDEX) level = TEMPLATE_PARM_LEVEL (t); else level = TEMPLATE_TYPE_LEVEL (t); return level == this_level; } /* Worker for uses_outer_template_parms, called via for_each_template_parm. DATA is really an int, indicating the innermost outer level of parameters. If T is a template parameter of that level or further out, return nonzero. */ static int template_parm_outer_level (tree t, void *data) { int this_level = *(int *)data; int level; if (TREE_CODE (t) == TEMPLATE_PARM_INDEX) level = TEMPLATE_PARM_LEVEL (t); else level = TEMPLATE_TYPE_LEVEL (t); return level <= this_level; } /* Creates a TEMPLATE_DECL for the indicated DECL using the template parameters given by current_template_args, or reuses a previously existing one, if appropriate. Returns the DECL, or an equivalent one, if it is replaced via a call to duplicate_decls. If IS_FRIEND is true, DECL is a friend declaration. */ tree push_template_decl_real (tree decl, bool is_friend) { tree tmpl; tree args; tree info; tree ctx; bool is_primary; bool is_partial; int new_template_p = 0; /* True if the template is a member template, in the sense of [temp.mem]. */ bool member_template_p = false; if (decl == error_mark_node || !current_template_parms) return error_mark_node; /* See if this is a partial specialization. */ is_partial = ((DECL_IMPLICIT_TYPEDEF_P (decl) && TREE_CODE (TREE_TYPE (decl)) != ENUMERAL_TYPE && CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (decl))) || (VAR_P (decl) && DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_SPECIALIZATION (decl) && TINFO_USED_TEMPLATE_ID (DECL_TEMPLATE_INFO (decl)))); if (TREE_CODE (decl) == FUNCTION_DECL && DECL_FRIEND_P (decl)) is_friend = true; if (is_friend) /* For a friend, we want the context of the friend, not the type of which it is a friend. */ ctx = CP_DECL_CONTEXT (decl); else if (CP_DECL_CONTEXT (decl) && TREE_CODE (CP_DECL_CONTEXT (decl)) != NAMESPACE_DECL) /* In the case of a virtual function, we want the class in which it is defined. */ ctx = CP_DECL_CONTEXT (decl); else /* Otherwise, if we're currently defining some class, the DECL is assumed to be a member of the class. */ ctx = current_scope (); if (ctx && TREE_CODE (ctx) == NAMESPACE_DECL) ctx = NULL_TREE; if (!DECL_CONTEXT (decl)) DECL_CONTEXT (decl) = FROB_CONTEXT (current_namespace); /* See if this is a primary template. */ if (is_friend && ctx && uses_template_parms_level (ctx, processing_template_decl)) /* A friend template that specifies a class context, i.e. template <typename T> friend void A<T>::f(); is not primary. */ is_primary = false; else if (TREE_CODE (decl) == TYPE_DECL && LAMBDA_TYPE_P (TREE_TYPE (decl))) is_primary = false; else is_primary = template_parm_scope_p (); if (is_primary) { warning (OPT_Wtemplates, "template %qD declared", decl); if (DECL_CLASS_SCOPE_P (decl)) member_template_p = true; if (TREE_CODE (decl) == TYPE_DECL && anon_aggrname_p (DECL_NAME (decl))) { error ("template class without a name"); return error_mark_node; } else if (TREE_CODE (decl) == FUNCTION_DECL) { if (member_template_p) { if (DECL_OVERRIDE_P (decl) || DECL_FINAL_P (decl)) error ("member template %qD may not have virt-specifiers", decl); } if (DECL_DESTRUCTOR_P (decl)) { /* [temp.mem] A destructor shall not be a member template. */ error ("destructor %qD declared as member template", decl); return error_mark_node; } if (IDENTIFIER_NEWDEL_OP_P (DECL_NAME (decl)) && (!prototype_p (TREE_TYPE (decl)) || TYPE_ARG_TYPES (TREE_TYPE (decl)) == void_list_node || !TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (decl))) || (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (decl))) == void_list_node))) { /* [basic.stc.dynamic.allocation] An allocation function can be a function template. ... Template allocation functions shall have two or more parameters. */ error ("invalid template declaration of %qD", decl); return error_mark_node; } } else if (DECL_IMPLICIT_TYPEDEF_P (decl) && CLASS_TYPE_P (TREE_TYPE (decl))) { /* Class template, set TEMPLATE_TYPE_PARM_FOR_CLASS. */ tree parms = INNERMOST_TEMPLATE_PARMS (current_template_parms); for (int i = 0; i < TREE_VEC_LENGTH (parms); ++i) { tree t = TREE_VALUE (TREE_VEC_ELT (parms, i)); if (TREE_CODE (t) == TYPE_DECL) t = TREE_TYPE (t); if (TREE_CODE (t) == TEMPLATE_TYPE_PARM) TEMPLATE_TYPE_PARM_FOR_CLASS (t) = true; } } else if (TREE_CODE (decl) == TYPE_DECL && TYPE_DECL_ALIAS_P (decl)) /* alias-declaration */ gcc_assert (!DECL_ARTIFICIAL (decl)); else if (VAR_P (decl)) /* C++14 variable template. */; else { error ("template declaration of %q#D", decl); return error_mark_node; } } /* Check to see that the rules regarding the use of default arguments are not being violated. We check args for a friend functions when we know whether it's a definition, introducing declaration or re-declaration. */ if (!is_friend || TREE_CODE (decl) != FUNCTION_DECL) check_default_tmpl_args (decl, current_template_parms, is_primary, is_partial, is_friend); /* Ensure that there are no parameter packs in the type of this declaration that have not been expanded. */ if (TREE_CODE (decl) == FUNCTION_DECL) { /* Check each of the arguments individually to see if there are any bare parameter packs. */ tree type = TREE_TYPE (decl); tree arg = DECL_ARGUMENTS (decl); tree argtype = TYPE_ARG_TYPES (type); while (arg && argtype) { if (!DECL_PACK_P (arg) && check_for_bare_parameter_packs (TREE_TYPE (arg))) { /* This is a PARM_DECL that contains unexpanded parameter packs. We have already complained about this in the check_for_bare_parameter_packs call, so just replace these types with ERROR_MARK_NODE. */ TREE_TYPE (arg) = error_mark_node; TREE_VALUE (argtype) = error_mark_node; } arg = DECL_CHAIN (arg); argtype = TREE_CHAIN (argtype); } /* Check for bare parameter packs in the return type and the exception specifiers. */ if (check_for_bare_parameter_packs (TREE_TYPE (type))) /* Errors were already issued, set return type to int as the frontend doesn't expect error_mark_node as the return type. */ TREE_TYPE (type) = integer_type_node; if (check_for_bare_parameter_packs (TYPE_RAISES_EXCEPTIONS (type))) TYPE_RAISES_EXCEPTIONS (type) = NULL_TREE; } else if (check_for_bare_parameter_packs ((TREE_CODE (decl) == TYPE_DECL && TYPE_DECL_ALIAS_P (decl)) ? DECL_ORIGINAL_TYPE (decl) : TREE_TYPE (decl))) { TREE_TYPE (decl) = error_mark_node; return error_mark_node; } if (is_partial) return process_partial_specialization (decl); args = current_template_args (); if (!ctx || TREE_CODE (ctx) == FUNCTION_DECL || (CLASS_TYPE_P (ctx) && TYPE_BEING_DEFINED (ctx)) || (TREE_CODE (decl) == TYPE_DECL && LAMBDA_TYPE_P (TREE_TYPE (decl))) || (is_friend && !DECL_TEMPLATE_INFO (decl))) { if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl) && DECL_TI_TEMPLATE (decl)) tmpl = DECL_TI_TEMPLATE (decl); /* If DECL is a TYPE_DECL for a class-template, then there won't be DECL_LANG_SPECIFIC. The information equivalent to DECL_TEMPLATE_INFO is found in TYPE_TEMPLATE_INFO instead. */ else if (DECL_IMPLICIT_TYPEDEF_P (decl) && TYPE_TEMPLATE_INFO (TREE_TYPE (decl)) && TYPE_TI_TEMPLATE (TREE_TYPE (decl))) { /* Since a template declaration already existed for this class-type, we must be redeclaring it here. Make sure that the redeclaration is valid. */ redeclare_class_template (TREE_TYPE (decl), current_template_parms, current_template_constraints ()); /* We don't need to create a new TEMPLATE_DECL; just use the one we already had. */ tmpl = TYPE_TI_TEMPLATE (TREE_TYPE (decl)); } else { tmpl = build_template_decl (decl, current_template_parms, member_template_p); new_template_p = 1; if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_SPECIALIZATION (decl)) { /* A specialization of a member template of a template class. */ SET_DECL_TEMPLATE_SPECIALIZATION (tmpl); DECL_TEMPLATE_INFO (tmpl) = DECL_TEMPLATE_INFO (decl); DECL_TEMPLATE_INFO (decl) = NULL_TREE; } } } else { tree a, t, current, parms; int i; tree tinfo = get_template_info (decl); if (!tinfo) { error ("template definition of non-template %q#D", decl); return error_mark_node; } tmpl = TI_TEMPLATE (tinfo); if (DECL_FUNCTION_TEMPLATE_P (tmpl) && DECL_TEMPLATE_INFO (decl) && DECL_TI_ARGS (decl) && DECL_TEMPLATE_SPECIALIZATION (decl) && DECL_MEMBER_TEMPLATE_P (tmpl)) { tree new_tmpl; /* The declaration is a specialization of a member template, declared outside the class. Therefore, the innermost template arguments will be NULL, so we replace them with the arguments determined by the earlier call to check_explicit_specialization. */ args = DECL_TI_ARGS (decl); new_tmpl = build_template_decl (decl, current_template_parms, member_template_p); DECL_TEMPLATE_RESULT (new_tmpl) = decl; TREE_TYPE (new_tmpl) = TREE_TYPE (decl); DECL_TI_TEMPLATE (decl) = new_tmpl; SET_DECL_TEMPLATE_SPECIALIZATION (new_tmpl); DECL_TEMPLATE_INFO (new_tmpl) = build_template_info (tmpl, args); register_specialization (new_tmpl, most_general_template (tmpl), args, is_friend, 0); return decl; } /* Make sure the template headers we got make sense. */ parms = DECL_TEMPLATE_PARMS (tmpl); i = TMPL_PARMS_DEPTH (parms); if (TMPL_ARGS_DEPTH (args) != i) { error ("expected %d levels of template parms for %q#D, got %d", i, decl, TMPL_ARGS_DEPTH (args)); DECL_INTERFACE_KNOWN (decl) = 1; return error_mark_node; } else for (current = decl; i > 0; --i, parms = TREE_CHAIN (parms)) { a = TMPL_ARGS_LEVEL (args, i); t = INNERMOST_TEMPLATE_PARMS (parms); if (TREE_VEC_LENGTH (t) != TREE_VEC_LENGTH (a)) { if (current == decl) error ("got %d template parameters for %q#D", TREE_VEC_LENGTH (a), decl); else error ("got %d template parameters for %q#T", TREE_VEC_LENGTH (a), current); error (" but %d required", TREE_VEC_LENGTH (t)); /* Avoid crash in import_export_decl. */ DECL_INTERFACE_KNOWN (decl) = 1; return error_mark_node; } if (current == decl) current = ctx; else if (current == NULL_TREE) /* Can happen in erroneous input. */ break; else current = get_containing_scope (current); } /* Check that the parms are used in the appropriate qualifying scopes in the declarator. */ if (!comp_template_args (TI_ARGS (tinfo), TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (tmpl))))) { error ("template arguments to %qD do not match original " "template %qD", decl, DECL_TEMPLATE_RESULT (tmpl)); if (!uses_template_parms (TI_ARGS (tinfo))) inform (input_location, "use %<template<>%> for" " an explicit specialization"); /* Avoid crash in import_export_decl. */ DECL_INTERFACE_KNOWN (decl) = 1; return error_mark_node; } } DECL_TEMPLATE_RESULT (tmpl) = decl; TREE_TYPE (tmpl) = TREE_TYPE (decl); /* Push template declarations for global functions and types. Note that we do not try to push a global template friend declared in a template class; such a thing may well depend on the template parameters of the class. */ if (new_template_p && !ctx && !(is_friend && template_class_depth (current_class_type) > 0)) { tmpl = pushdecl_namespace_level (tmpl, is_friend); if (tmpl == error_mark_node) return error_mark_node; /* Hide template friend classes that haven't been declared yet. */ if (is_friend && TREE_CODE (decl) == TYPE_DECL) { DECL_ANTICIPATED (tmpl) = 1; DECL_FRIEND_P (tmpl) = 1; } } if (is_primary) { tree parms = DECL_TEMPLATE_PARMS (tmpl); DECL_PRIMARY_TEMPLATE (tmpl) = tmpl; /* Give template template parms a DECL_CONTEXT of the template for which they are a parameter. */ parms = INNERMOST_TEMPLATE_PARMS (parms); for (int i = TREE_VEC_LENGTH (parms) - 1; i >= 0; --i) { tree parm = TREE_VALUE (TREE_VEC_ELT (parms, i)); if (TREE_CODE (parm) == TEMPLATE_DECL) DECL_CONTEXT (parm) = tmpl; } if (TREE_CODE (decl) == TYPE_DECL && TYPE_DECL_ALIAS_P (decl) && complex_alias_template_p (tmpl)) TEMPLATE_DECL_COMPLEX_ALIAS_P (tmpl) = true; } /* The DECL_TI_ARGS of DECL contains full set of arguments referring back to its most general template. If TMPL is a specialization, ARGS may only have the innermost set of arguments. Add the missing argument levels if necessary. */ if (DECL_TEMPLATE_INFO (tmpl)) args = add_outermost_template_args (DECL_TI_ARGS (tmpl), args); info = build_template_info (tmpl, args); if (DECL_IMPLICIT_TYPEDEF_P (decl)) SET_TYPE_TEMPLATE_INFO (TREE_TYPE (tmpl), info); else { if (is_primary) retrofit_lang_decl (decl); if (DECL_LANG_SPECIFIC (decl)) DECL_TEMPLATE_INFO (decl) = info; } if (flag_implicit_templates && !is_friend && TREE_PUBLIC (decl) && VAR_OR_FUNCTION_DECL_P (decl)) /* Set DECL_COMDAT on template instantiations; if we force them to be emitted by explicit instantiation or -frepo, mark_needed will tell cgraph to do the right thing. */ DECL_COMDAT (decl) = true; return DECL_TEMPLATE_RESULT (tmpl); } tree push_template_decl (tree decl) { return push_template_decl_real (decl, false); } /* FN is an inheriting constructor that inherits from the constructor template INHERITED; turn FN into a constructor template with a matching template header. */ tree add_inherited_template_parms (tree fn, tree inherited) { tree inner_parms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (inherited)); inner_parms = copy_node (inner_parms); tree parms = tree_cons (size_int (processing_template_decl + 1), inner_parms, current_template_parms); tree tmpl = build_template_decl (fn, parms, /*member*/true); tree args = template_parms_to_args (parms); DECL_TEMPLATE_INFO (fn) = build_template_info (tmpl, args); TREE_TYPE (tmpl) = TREE_TYPE (fn); DECL_TEMPLATE_RESULT (tmpl) = fn; DECL_ARTIFICIAL (tmpl) = true; DECL_PRIMARY_TEMPLATE (tmpl) = tmpl; return tmpl; } /* Called when a class template TYPE is redeclared with the indicated template PARMS, e.g.: template <class T> struct S; template <class T> struct S {}; */ bool redeclare_class_template (tree type, tree parms, tree cons) { tree tmpl; tree tmpl_parms; int i; if (!TYPE_TEMPLATE_INFO (type)) { error ("%qT is not a template type", type); return false; } tmpl = TYPE_TI_TEMPLATE (type); if (!PRIMARY_TEMPLATE_P (tmpl)) /* The type is nested in some template class. Nothing to worry about here; there are no new template parameters for the nested type. */ return true; if (!parms) { error ("template specifiers not specified in declaration of %qD", tmpl); return false; } parms = INNERMOST_TEMPLATE_PARMS (parms); tmpl_parms = DECL_INNERMOST_TEMPLATE_PARMS (tmpl); if (TREE_VEC_LENGTH (parms) != TREE_VEC_LENGTH (tmpl_parms)) { error_n (input_location, TREE_VEC_LENGTH (parms), "redeclared with %d template parameter", "redeclared with %d template parameters", TREE_VEC_LENGTH (parms)); inform_n (DECL_SOURCE_LOCATION (tmpl), TREE_VEC_LENGTH (tmpl_parms), "previous declaration %qD used %d template parameter", "previous declaration %qD used %d template parameters", tmpl, TREE_VEC_LENGTH (tmpl_parms)); return false; } for (i = 0; i < TREE_VEC_LENGTH (tmpl_parms); ++i) { tree tmpl_parm; tree parm; tree tmpl_default; tree parm_default; if (TREE_VEC_ELT (tmpl_parms, i) == error_mark_node || TREE_VEC_ELT (parms, i) == error_mark_node) continue; tmpl_parm = TREE_VALUE (TREE_VEC_ELT (tmpl_parms, i)); if (error_operand_p (tmpl_parm)) return false; parm = TREE_VALUE (TREE_VEC_ELT (parms, i)); tmpl_default = TREE_PURPOSE (TREE_VEC_ELT (tmpl_parms, i)); parm_default = TREE_PURPOSE (TREE_VEC_ELT (parms, i)); /* TMPL_PARM and PARM can be either TYPE_DECL, PARM_DECL, or TEMPLATE_DECL. */ if (TREE_CODE (tmpl_parm) != TREE_CODE (parm) || (TREE_CODE (tmpl_parm) != TYPE_DECL && !same_type_p (TREE_TYPE (tmpl_parm), TREE_TYPE (parm))) || (TREE_CODE (tmpl_parm) != PARM_DECL && (TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (tmpl_parm)) != TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (parm)))) || (TREE_CODE (tmpl_parm) == PARM_DECL && (TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (tmpl_parm)) != TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))))) { error ("template parameter %q+#D", tmpl_parm); error ("redeclared here as %q#D", parm); return false; } if (tmpl_default != NULL_TREE && parm_default != NULL_TREE) { /* We have in [temp.param]: A template-parameter may not be given default arguments by two different declarations in the same scope. */ error_at (input_location, "redefinition of default argument for %q#D", parm); inform (DECL_SOURCE_LOCATION (tmpl_parm), "original definition appeared here"); return false; } if (parm_default != NULL_TREE) /* Update the previous template parameters (which are the ones that will really count) with the new default value. */ TREE_PURPOSE (TREE_VEC_ELT (tmpl_parms, i)) = parm_default; else if (tmpl_default != NULL_TREE) /* Update the new parameters, too; they'll be used as the parameters for any members. */ TREE_PURPOSE (TREE_VEC_ELT (parms, i)) = tmpl_default; /* Give each template template parm in this redeclaration a DECL_CONTEXT of the template for which they are a parameter. */ if (TREE_CODE (parm) == TEMPLATE_DECL) { gcc_assert (DECL_CONTEXT (parm) == NULL_TREE); DECL_CONTEXT (parm) = tmpl; } if (TREE_CODE (parm) == TYPE_DECL) TEMPLATE_TYPE_PARM_FOR_CLASS (TREE_TYPE (parm)) = true; } // Cannot redeclare a class template with a different set of constraints. if (!equivalent_constraints (get_constraints (tmpl), cons)) { error_at (input_location, "redeclaration %q#D with different " "constraints", tmpl); inform (DECL_SOURCE_LOCATION (tmpl), "original declaration appeared here"); } return true; } /* The actual substitution part of instantiate_non_dependent_expr_sfinae, to be used when the caller has already checked (processing_template_decl && !instantiation_dependent_expression_p (expr) && potential_constant_expression (expr)) and cleared processing_template_decl. */ tree instantiate_non_dependent_expr_internal (tree expr, tsubst_flags_t complain) { return tsubst_copy_and_build (expr, /*args=*/NULL_TREE, complain, /*in_decl=*/NULL_TREE, /*function_p=*/false, /*integral_constant_expression_p=*/true); } /* Simplify EXPR if it is a non-dependent expression. Returns the (possibly simplified) expression. */ tree instantiate_non_dependent_expr_sfinae (tree expr, tsubst_flags_t complain) { if (expr == NULL_TREE) return NULL_TREE; /* If we're in a template, but EXPR isn't value dependent, simplify it. We're supposed to treat: template <typename T> void f(T[1 + 1]); template <typename T> void f(T[2]); as two declarations of the same function, for example. */ if (processing_template_decl && is_nondependent_constant_expression (expr)) { processing_template_decl_sentinel s; expr = instantiate_non_dependent_expr_internal (expr, complain); } return expr; } tree instantiate_non_dependent_expr (tree expr) { return instantiate_non_dependent_expr_sfinae (expr, tf_error); } /* Like instantiate_non_dependent_expr, but return NULL_TREE rather than an uninstantiated expression. */ tree instantiate_non_dependent_or_null (tree expr) { if (expr == NULL_TREE) return NULL_TREE; if (processing_template_decl) { if (!is_nondependent_constant_expression (expr)) expr = NULL_TREE; else { processing_template_decl_sentinel s; expr = instantiate_non_dependent_expr_internal (expr, tf_error); } } return expr; } /* True iff T is a specialization of a variable template. */ bool variable_template_specialization_p (tree t) { if (!VAR_P (t) || !DECL_LANG_SPECIFIC (t) || !DECL_TEMPLATE_INFO (t)) return false; tree tmpl = DECL_TI_TEMPLATE (t); return variable_template_p (tmpl); } /* Return TRUE iff T is a type alias, a TEMPLATE_DECL for an alias template declaration, or a TYPE_DECL for an alias declaration. */ bool alias_type_or_template_p (tree t) { if (t == NULL_TREE) return false; return ((TREE_CODE (t) == TYPE_DECL && TYPE_DECL_ALIAS_P (t)) || (TYPE_P (t) && TYPE_NAME (t) && TYPE_DECL_ALIAS_P (TYPE_NAME (t))) || DECL_ALIAS_TEMPLATE_P (t)); } /* Return TRUE iff T is a specialization of an alias template. */ bool alias_template_specialization_p (const_tree t) { /* It's an alias template specialization if it's an alias and its TYPE_NAME is a specialization of a primary template. */ if (TYPE_ALIAS_P (t)) if (tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (t)) return PRIMARY_TEMPLATE_P (TI_TEMPLATE (tinfo)); return false; } /* An alias template is complex from a SFINAE perspective if a template-id using that alias can be ill-formed when the expansion is not, as with the void_t template. We determine this by checking whether the expansion for the alias template uses all its template parameters. */ struct uses_all_template_parms_data { int level; bool *seen; }; static int uses_all_template_parms_r (tree t, void *data_) { struct uses_all_template_parms_data &data = *(struct uses_all_template_parms_data*)data_; tree idx = get_template_parm_index (t); if (TEMPLATE_PARM_LEVEL (idx) == data.level) data.seen[TEMPLATE_PARM_IDX (idx)] = true; return 0; } static bool complex_alias_template_p (const_tree tmpl) { struct uses_all_template_parms_data data; tree pat = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl)); tree parms = DECL_TEMPLATE_PARMS (tmpl); data.level = TMPL_PARMS_DEPTH (parms); int len = TREE_VEC_LENGTH (INNERMOST_TEMPLATE_PARMS (parms)); data.seen = XALLOCAVEC (bool, len); for (int i = 0; i < len; ++i) data.seen[i] = false; for_each_template_parm (pat, uses_all_template_parms_r, &data, NULL, true); for (int i = 0; i < len; ++i) if (!data.seen[i]) return true; return false; } /* Return TRUE iff T is a specialization of a complex alias template with dependent template-arguments. */ bool dependent_alias_template_spec_p (const_tree t) { if (!alias_template_specialization_p (t)) return false; tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (t); if (!TEMPLATE_DECL_COMPLEX_ALIAS_P (TI_TEMPLATE (tinfo))) return false; tree args = INNERMOST_TEMPLATE_ARGS (TI_ARGS (tinfo)); if (!any_dependent_template_arguments_p (args)) return false; return true; } /* Return the number of innermost template parameters in TMPL. */ static int num_innermost_template_parms (tree tmpl) { tree parms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (tmpl)); return TREE_VEC_LENGTH (parms); } /* Return either TMPL or another template that it is equivalent to under DR 1286: An alias that just changes the name of a template is equivalent to the other template. */ static tree get_underlying_template (tree tmpl) { gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL); while (DECL_ALIAS_TEMPLATE_P (tmpl)) { /* Determine if the alias is equivalent to an underlying template. */ tree orig_type = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl)); tree tinfo = TYPE_TEMPLATE_INFO_MAYBE_ALIAS (orig_type); if (!tinfo) break; tree underlying = TI_TEMPLATE (tinfo); if (!PRIMARY_TEMPLATE_P (underlying) || (num_innermost_template_parms (tmpl) != num_innermost_template_parms (underlying))) break; tree alias_args = INNERMOST_TEMPLATE_ARGS (template_parms_to_args (DECL_TEMPLATE_PARMS (tmpl))); if (!comp_template_args (TI_ARGS (tinfo), alias_args)) break; /* Alias is equivalent. Strip it and repeat. */ tmpl = underlying; } return tmpl; } /* Subroutine of convert_nontype_argument. Converts EXPR to TYPE, which must be a reference-to-function or a pointer-to-function type, as specified in [temp.arg.nontype]: disambiguate EXPR if it is an overload set, and check that the resulting function has external linkage. */ static tree convert_nontype_argument_function (tree type, tree expr, tsubst_flags_t complain) { tree fns = expr; tree fn, fn_no_ptr; linkage_kind linkage; fn = instantiate_type (type, fns, tf_none); if (fn == error_mark_node) return error_mark_node; if (value_dependent_expression_p (fn)) goto accept; fn_no_ptr = strip_fnptr_conv (fn); if (TREE_CODE (fn_no_ptr) == ADDR_EXPR) fn_no_ptr = TREE_OPERAND (fn_no_ptr, 0); if (BASELINK_P (fn_no_ptr)) fn_no_ptr = BASELINK_FUNCTIONS (fn_no_ptr); /* [temp.arg.nontype]/1 A template-argument for a non-type, non-template template-parameter shall be one of: [...] -- the address of an object or function with external [C++11: or internal] linkage. */ if (TREE_CODE (fn_no_ptr) != FUNCTION_DECL) { if (complain & tf_error) { error ("%qE is not a valid template argument for type %qT", expr, type); if (TYPE_PTR_P (type)) inform (input_location, "it must be the address of a function " "with external linkage"); else inform (input_location, "it must be the name of a function with " "external linkage"); } return NULL_TREE; } linkage = decl_linkage (fn_no_ptr); if (cxx_dialect >= cxx11 ? linkage == lk_none : linkage != lk_external) { if (complain & tf_error) { if (cxx_dialect >= cxx11) error ("%qE is not a valid template argument for type %qT " "because %qD has no linkage", expr, type, fn_no_ptr); else error ("%qE is not a valid template argument for type %qT " "because %qD does not have external linkage", expr, type, fn_no_ptr); } return NULL_TREE; } accept: if (TREE_CODE (type) == REFERENCE_TYPE) { if (REFERENCE_REF_P (fn)) fn = TREE_OPERAND (fn, 0); else fn = build_address (fn); } if (!same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (fn))) fn = build_nop (type, fn); return fn; } /* Subroutine of convert_nontype_argument. Check if EXPR of type TYPE is a valid pointer-to-member constant. Emit an error otherwise. */ static bool check_valid_ptrmem_cst_expr (tree type, tree expr, tsubst_flags_t complain) { location_t loc = EXPR_LOC_OR_LOC (expr, input_location); tree orig_expr = expr; STRIP_NOPS (expr); if (null_ptr_cst_p (expr)) return true; if (TREE_CODE (expr) == PTRMEM_CST && same_type_p (TYPE_PTRMEM_CLASS_TYPE (type), PTRMEM_CST_CLASS (expr))) return true; if (cxx_dialect >= cxx11 && null_member_pointer_value_p (expr)) return true; if (processing_template_decl && TREE_CODE (expr) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (expr, 0)) == OFFSET_REF) return true; if (complain & tf_error) { error_at (loc, "%qE is not a valid template argument for type %qT", orig_expr, type); if (TREE_CODE (expr) != PTRMEM_CST) inform (loc, "it must be a pointer-to-member of the form %<&X::Y%>"); else inform (loc, "because it is a member of %qT", PTRMEM_CST_CLASS (expr)); } return false; } /* Returns TRUE iff the address of OP is value-dependent. 14.6.2.4 [temp.dep.temp]: A non-integral non-type template-argument is dependent if its type is dependent or it has either of the following forms qualified-id & qualified-id and contains a nested-name-specifier which specifies a class-name that names a dependent type. We generalize this to just say that the address of a member of a dependent class is value-dependent; the above doesn't cover the address of a static data member named with an unqualified-id. */ static bool has_value_dependent_address (tree op) { /* We could use get_inner_reference here, but there's no need; this is only relevant for template non-type arguments, which can only be expressed as &id-expression. */ if (DECL_P (op)) { tree ctx = CP_DECL_CONTEXT (op); if (TYPE_P (ctx) && dependent_type_p (ctx)) return true; } return false; } /* The next set of functions are used for providing helpful explanatory diagnostics for failed overload resolution. Their messages should be indented by two spaces for consistency with the messages in call.c */ static int unify_success (bool /*explain_p*/) { return 0; } /* Other failure functions should call this one, to provide a single function for setting a breakpoint on. */ static int unify_invalid (bool /*explain_p*/) { return 1; } static int unify_parameter_deduction_failure (bool explain_p, tree parm) { if (explain_p) inform (input_location, " couldn't deduce template parameter %qD", parm); return unify_invalid (explain_p); } static int unify_cv_qual_mismatch (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " types %qT and %qT have incompatible cv-qualifiers", parm, arg); return unify_invalid (explain_p); } static int unify_type_mismatch (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " mismatched types %qT and %qT", parm, arg); return unify_invalid (explain_p); } static int unify_parameter_pack_mismatch (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " template parameter %qD is not a parameter pack, but " "argument %qD is", parm, arg); return unify_invalid (explain_p); } static int unify_ptrmem_cst_mismatch (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " template argument %qE does not match " "pointer-to-member constant %qE", arg, parm); return unify_invalid (explain_p); } static int unify_expression_unequal (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " %qE is not equivalent to %qE", parm, arg); return unify_invalid (explain_p); } static int unify_parameter_pack_inconsistent (bool explain_p, tree old_arg, tree new_arg) { if (explain_p) inform (input_location, " inconsistent parameter pack deduction with %qT and %qT", old_arg, new_arg); return unify_invalid (explain_p); } static int unify_inconsistency (bool explain_p, tree parm, tree first, tree second) { if (explain_p) { if (TYPE_P (parm)) inform (input_location, " deduced conflicting types for parameter %qT (%qT and %qT)", parm, first, second); else inform (input_location, " deduced conflicting values for non-type parameter " "%qE (%qE and %qE)", parm, first, second); } return unify_invalid (explain_p); } static int unify_vla_arg (bool explain_p, tree arg) { if (explain_p) inform (input_location, " variable-sized array type %qT is not " "a valid template argument", arg); return unify_invalid (explain_p); } static int unify_method_type_error (bool explain_p, tree arg) { if (explain_p) inform (input_location, " member function type %qT is not a valid template argument", arg); return unify_invalid (explain_p); } static int unify_arity (bool explain_p, int have, int wanted, bool least_p = false) { if (explain_p) { if (least_p) inform_n (input_location, wanted, " candidate expects at least %d argument, %d provided", " candidate expects at least %d arguments, %d provided", wanted, have); else inform_n (input_location, wanted, " candidate expects %d argument, %d provided", " candidate expects %d arguments, %d provided", wanted, have); } return unify_invalid (explain_p); } static int unify_too_many_arguments (bool explain_p, int have, int wanted) { return unify_arity (explain_p, have, wanted); } static int unify_too_few_arguments (bool explain_p, int have, int wanted, bool least_p = false) { return unify_arity (explain_p, have, wanted, least_p); } static int unify_arg_conversion (bool explain_p, tree to_type, tree from_type, tree arg) { if (explain_p) inform (EXPR_LOC_OR_LOC (arg, input_location), " cannot convert %qE (type %qT) to type %qT", arg, from_type, to_type); return unify_invalid (explain_p); } static int unify_no_common_base (bool explain_p, enum template_base_result r, tree parm, tree arg) { if (explain_p) switch (r) { case tbr_ambiguous_baseclass: inform (input_location, " %qT is an ambiguous base class of %qT", parm, arg); break; default: inform (input_location, " %qT is not derived from %qT", arg, parm); break; } return unify_invalid (explain_p); } static int unify_inconsistent_template_template_parameters (bool explain_p) { if (explain_p) inform (input_location, " template parameters of a template template argument are " "inconsistent with other deduced template arguments"); return unify_invalid (explain_p); } static int unify_template_deduction_failure (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " can't deduce a template for %qT from non-template type %qT", parm, arg); return unify_invalid (explain_p); } static int unify_template_argument_mismatch (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " template argument %qE does not match %qE", arg, parm); return unify_invalid (explain_p); } /* Attempt to convert the non-type template parameter EXPR to the indicated TYPE. If the conversion is successful, return the converted value. If the conversion is unsuccessful, return NULL_TREE if we issued an error message, or error_mark_node if we did not. We issue error messages for out-and-out bad template parameters, but not simply because the conversion failed, since we might be just trying to do argument deduction. Both TYPE and EXPR must be non-dependent. The conversion follows the special rules described in [temp.arg.nontype], and it is much more strict than an implicit conversion. This function is called twice for each template argument (see lookup_template_class for a more accurate description of this problem). This means that we need to handle expressions which are not valid in a C++ source, but can be created from the first call (for instance, casts to perform conversions). These hacks can go away after we fix the double coercion problem. */ static tree convert_nontype_argument (tree type, tree expr, tsubst_flags_t complain) { tree expr_type; location_t loc = EXPR_LOC_OR_LOC (expr, input_location); tree orig_expr = expr; /* Detect immediately string literals as invalid non-type argument. This special-case is not needed for correctness (we would easily catch this later), but only to provide better diagnostic for this common user mistake. As suggested by DR 100, we do not mention linkage issues in the diagnostic as this is not the point. */ /* FIXME we're making this OK. */ if (TREE_CODE (expr) == STRING_CST) { if (complain & tf_error) error ("%qE is not a valid template argument for type %qT " "because string literals can never be used in this context", expr, type); return NULL_TREE; } /* Add the ADDR_EXPR now for the benefit of value_dependent_expression_p. */ if (TYPE_PTROBV_P (type) && TREE_CODE (TREE_TYPE (expr)) == ARRAY_TYPE) { expr = decay_conversion (expr, complain); if (expr == error_mark_node) return error_mark_node; } /* If we are in a template, EXPR may be non-dependent, but still have a syntactic, rather than semantic, form. For example, EXPR might be a SCOPE_REF, rather than the VAR_DECL to which the SCOPE_REF refers. Preserving the qualifying scope is necessary so that access checking can be performed when the template is instantiated -- but here we need the resolved form so that we can convert the argument. */ bool non_dep = false; if (TYPE_REF_OBJ_P (type) && has_value_dependent_address (expr)) /* If we want the address and it's value-dependent, don't fold. */; else if (processing_template_decl && is_nondependent_constant_expression (expr)) non_dep = true; if (error_operand_p (expr)) return error_mark_node; expr_type = TREE_TYPE (expr); /* If the argument is non-dependent, perform any conversions in non-dependent context as well. */ processing_template_decl_sentinel s (non_dep); if (non_dep) expr = instantiate_non_dependent_expr_internal (expr, complain); if (value_dependent_expression_p (expr)) expr = canonicalize_expr_argument (expr, complain); /* 14.3.2/5: The null pointer{,-to-member} conversion is applied to a non-type argument of "nullptr". */ if (NULLPTR_TYPE_P (expr_type) && TYPE_PTR_OR_PTRMEM_P (type)) expr = fold_simple (convert (type, expr)); /* In C++11, integral or enumeration non-type template arguments can be arbitrary constant expressions. Pointer and pointer to member arguments can be general constant expressions that evaluate to a null value, but otherwise still need to be of a specific form. */ if (cxx_dialect >= cxx11) { if (TREE_CODE (expr) == PTRMEM_CST) /* A PTRMEM_CST is already constant, and a valid template argument for a parameter of pointer to member type, we just want to leave it in that form rather than lower it to a CONSTRUCTOR. */; else if (INTEGRAL_OR_ENUMERATION_TYPE_P (type) || cxx_dialect >= cxx17) { /* C++17: A template-argument for a non-type template-parameter shall be a converted constant expression (8.20) of the type of the template-parameter. */ expr = build_converted_constant_expr (type, expr, complain); if (expr == error_mark_node) return error_mark_node; expr = maybe_constant_value (expr); expr = convert_from_reference (expr); } else if (TYPE_PTR_OR_PTRMEM_P (type)) { tree folded = maybe_constant_value (expr); if (TYPE_PTR_P (type) ? integer_zerop (folded) : null_member_pointer_value_p (folded)) expr = folded; } } if (TREE_CODE (type) == REFERENCE_TYPE) expr = mark_lvalue_use (expr); else expr = mark_rvalue_use (expr); /* HACK: Due to double coercion, we can get a NOP_EXPR<REFERENCE_TYPE>(ADDR_EXPR<POINTER_TYPE> (arg)) here, which is the tree that we built on the first call (see below when coercing to reference to object or to reference to function). We just strip everything and get to the arg. See g++.old-deja/g++.oliva/template4.C and g++.dg/template/nontype9.C for examples. */ if (TYPE_REF_OBJ_P (type) || TYPE_REFFN_P (type)) { tree probe_type, probe = expr; if (REFERENCE_REF_P (probe)) probe = TREE_OPERAND (probe, 0); probe_type = TREE_TYPE (probe); if (TREE_CODE (probe) == NOP_EXPR) { /* ??? Maybe we could use convert_from_reference here, but we would need to relax its constraints because the NOP_EXPR could actually change the type to something more cv-qualified, and this is not folded by convert_from_reference. */ tree addr = TREE_OPERAND (probe, 0); if (TREE_CODE (probe_type) == REFERENCE_TYPE && TREE_CODE (addr) == ADDR_EXPR && TYPE_PTR_P (TREE_TYPE (addr)) && (same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (probe_type), TREE_TYPE (TREE_TYPE (addr))))) { expr = TREE_OPERAND (addr, 0); expr_type = TREE_TYPE (probe_type); } } } /* [temp.arg.nontype]/5, bullet 1 For a non-type template-parameter of integral or enumeration type, integral promotions (_conv.prom_) and integral conversions (_conv.integral_) are applied. */ if (INTEGRAL_OR_ENUMERATION_TYPE_P (type)) { if (cxx_dialect < cxx11) { tree t = build_converted_constant_expr (type, expr, complain); t = maybe_constant_value (t); if (t != error_mark_node) expr = t; } if (!same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (expr))) return error_mark_node; /* Notice that there are constant expressions like '4 % 0' which do not fold into integer constants. */ if (TREE_CODE (expr) != INTEGER_CST && !value_dependent_expression_p (expr)) { if (complain & tf_error) { int errs = errorcount, warns = warningcount + werrorcount; if (!require_potential_constant_expression (expr)) expr = error_mark_node; else expr = cxx_constant_value (expr); if (errorcount > errs || warningcount + werrorcount > warns) inform (loc, "in template argument for type %qT", type); if (expr == error_mark_node) return NULL_TREE; /* else cxx_constant_value complained but gave us a real constant, so go ahead. */ if (TREE_CODE (expr) != INTEGER_CST) { /* Some assemble time constant expressions like (intptr_t)&&lab1 - (intptr_t)&&lab2 or 4 + (intptr_t)&&var satisfy reduced_constant_expression_p as we can emit them into .rodata initializers of variables, yet they can't fold into an INTEGER_CST at compile time. Refuse them here. */ gcc_checking_assert (reduced_constant_expression_p (expr)); error_at (loc, "template argument %qE for type %qT not " "a constant integer", expr, type); return NULL_TREE; } } else return NULL_TREE; } /* Avoid typedef problems. */ if (TREE_TYPE (expr) != type) expr = fold_convert (type, expr); } /* [temp.arg.nontype]/5, bullet 2 For a non-type template-parameter of type pointer to object, qualification conversions (_conv.qual_) and the array-to-pointer conversion (_conv.array_) are applied. */ else if (TYPE_PTROBV_P (type)) { tree decayed = expr; /* Look through any NOP_EXPRs around an ADDR_EXPR, whether they come from decay_conversion or an explicit cast. If it's a problematic cast, we'll complain about it below. */ if (TREE_CODE (expr) == NOP_EXPR) { tree probe = expr; STRIP_NOPS (probe); if (TREE_CODE (probe) == ADDR_EXPR && TYPE_PTR_P (TREE_TYPE (probe))) { expr = probe; expr_type = TREE_TYPE (expr); } } /* [temp.arg.nontype]/1 (TC1 version, DR 49): A template-argument for a non-type, non-template template-parameter shall be one of: [...] -- the name of a non-type template-parameter; -- the address of an object or function with external linkage, [...] expressed as "& id-expression" where the & is optional if the name refers to a function or array, or if the corresponding template-parameter is a reference. Here, we do not care about functions, as they are invalid anyway for a parameter of type pointer-to-object. */ if (value_dependent_expression_p (expr)) /* Non-type template parameters are OK. */ ; else if (cxx_dialect >= cxx11 && integer_zerop (expr)) /* Null pointer values are OK in C++11. */; else if (TREE_CODE (expr) != ADDR_EXPR) { if (VAR_P (expr)) { if (complain & tf_error) error ("%qD is not a valid template argument " "because %qD is a variable, not the address of " "a variable", orig_expr, expr); return NULL_TREE; } if (POINTER_TYPE_P (expr_type)) { if (complain & tf_error) error ("%qE is not a valid template argument for %qT " "because it is not the address of a variable", orig_expr, type); return NULL_TREE; } /* Other values, like integer constants, might be valid non-type arguments of some other type. */ return error_mark_node; } else { tree decl = TREE_OPERAND (expr, 0); if (!VAR_P (decl)) { if (complain & tf_error) error ("%qE is not a valid template argument of type %qT " "because %qE is not a variable", orig_expr, type, decl); return NULL_TREE; } else if (cxx_dialect < cxx11 && !DECL_EXTERNAL_LINKAGE_P (decl)) { if (complain & tf_error) error ("%qE is not a valid template argument of type %qT " "because %qD does not have external linkage", orig_expr, type, decl); return NULL_TREE; } else if ((cxx_dialect >= cxx11 && cxx_dialect < cxx17) && decl_linkage (decl) == lk_none) { if (complain & tf_error) error ("%qE is not a valid template argument of type %qT " "because %qD has no linkage", orig_expr, type, decl); return NULL_TREE; } /* C++17: For a non-type template-parameter of reference or pointer type, the value of the constant expression shall not refer to (or for a pointer type, shall not be the address of): * a subobject (4.5), * a temporary object (15.2), * a string literal (5.13.5), * the result of a typeid expression (8.2.8), or * a predefined __func__ variable (11.4.1). */ else if (DECL_ARTIFICIAL (decl)) { if (complain & tf_error) error ("the address of %qD is not a valid template argument", decl); return NULL_TREE; } else if (!same_type_ignoring_top_level_qualifiers_p (strip_array_types (TREE_TYPE (type)), strip_array_types (TREE_TYPE (decl)))) { if (complain & tf_error) error ("the address of the %qT subobject of %qD is not a " "valid template argument", TREE_TYPE (type), decl); return NULL_TREE; } else if (!TREE_STATIC (decl) && !DECL_EXTERNAL (decl)) { if (complain & tf_error) error ("the address of %qD is not a valid template argument " "because it does not have static storage duration", decl); return NULL_TREE; } } expr = decayed; expr = perform_qualification_conversions (type, expr); if (expr == error_mark_node) return error_mark_node; } /* [temp.arg.nontype]/5, bullet 3 For a non-type template-parameter of type reference to object, no conversions apply. The type referred to by the reference may be more cv-qualified than the (otherwise identical) type of the template-argument. The template-parameter is bound directly to the template-argument, which must be an lvalue. */ else if (TYPE_REF_OBJ_P (type)) { if (!same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (type), expr_type)) return error_mark_node; if (!at_least_as_qualified_p (TREE_TYPE (type), expr_type)) { if (complain & tf_error) error ("%qE is not a valid template argument for type %qT " "because of conflicts in cv-qualification", expr, type); return NULL_TREE; } if (!lvalue_p (expr)) { if (complain & tf_error) error ("%qE is not a valid template argument for type %qT " "because it is not an lvalue", expr, type); return NULL_TREE; } /* [temp.arg.nontype]/1 A template-argument for a non-type, non-template template-parameter shall be one of: [...] -- the address of an object or function with external linkage. */ if (INDIRECT_REF_P (expr) && TYPE_REF_OBJ_P (TREE_TYPE (TREE_OPERAND (expr, 0)))) { expr = TREE_OPERAND (expr, 0); if (DECL_P (expr)) { if (complain & tf_error) error ("%q#D is not a valid template argument for type %qT " "because a reference variable does not have a constant " "address", expr, type); return NULL_TREE; } } if (TYPE_REF_OBJ_P (TREE_TYPE (expr)) && value_dependent_expression_p (expr)) /* OK, dependent reference. We don't want to ask whether a DECL is itself value-dependent, since what we want here is its address. */; else { if (!DECL_P (expr)) { if (complain & tf_error) error ("%qE is not a valid template argument for type %qT " "because it is not an object with linkage", expr, type); return NULL_TREE; } /* DR 1155 allows internal linkage in C++11 and up. */ linkage_kind linkage = decl_linkage (expr); if (linkage < (cxx_dialect >= cxx11 ? lk_internal : lk_external)) { if (complain & tf_error) error ("%qE is not a valid template argument for type %qT " "because object %qD does not have linkage", expr, type, expr); return NULL_TREE; } expr = build_address (expr); } if (!same_type_p (type, TREE_TYPE (expr))) expr = build_nop (type, expr); } /* [temp.arg.nontype]/5, bullet 4 For a non-type template-parameter of type pointer to function, only the function-to-pointer conversion (_conv.func_) is applied. If the template-argument represents a set of overloaded functions (or a pointer to such), the matching function is selected from the set (_over.over_). */ else if (TYPE_PTRFN_P (type)) { /* If the argument is a template-id, we might not have enough context information to decay the pointer. */ if (!type_unknown_p (expr_type)) { expr = decay_conversion (expr, complain); if (expr == error_mark_node) return error_mark_node; } if (cxx_dialect >= cxx11 && integer_zerop (expr)) /* Null pointer values are OK in C++11. */ return perform_qualification_conversions (type, expr); expr = convert_nontype_argument_function (type, expr, complain); if (!expr || expr == error_mark_node) return expr; } /* [temp.arg.nontype]/5, bullet 5 For a non-type template-parameter of type reference to function, no conversions apply. If the template-argument represents a set of overloaded functions, the matching function is selected from the set (_over.over_). */ else if (TYPE_REFFN_P (type)) { if (TREE_CODE (expr) == ADDR_EXPR) { if (complain & tf_error) { error ("%qE is not a valid template argument for type %qT " "because it is a pointer", expr, type); inform (input_location, "try using %qE instead", TREE_OPERAND (expr, 0)); } return NULL_TREE; } expr = convert_nontype_argument_function (type, expr, complain); if (!expr || expr == error_mark_node) return expr; } /* [temp.arg.nontype]/5, bullet 6 For a non-type template-parameter of type pointer to member function, no conversions apply. If the template-argument represents a set of overloaded member functions, the matching member function is selected from the set (_over.over_). */ else if (TYPE_PTRMEMFUNC_P (type)) { expr = instantiate_type (type, expr, tf_none); if (expr == error_mark_node) return error_mark_node; /* [temp.arg.nontype] bullet 1 says the pointer to member expression must be a pointer-to-member constant. */ if (!value_dependent_expression_p (expr) && !check_valid_ptrmem_cst_expr (type, expr, complain)) return NULL_TREE; /* Repeated conversion can't deal with a conversion that turns PTRMEM_CST into a CONSTRUCTOR, so build up a new PTRMEM_CST instead. */ if (fnptr_conv_p (type, TREE_TYPE (expr))) expr = make_ptrmem_cst (type, PTRMEM_CST_MEMBER (expr)); } /* [temp.arg.nontype]/5, bullet 7 For a non-type template-parameter of type pointer to data member, qualification conversions (_conv.qual_) are applied. */ else if (TYPE_PTRDATAMEM_P (type)) { /* [temp.arg.nontype] bullet 1 says the pointer to member expression must be a pointer-to-member constant. */ if (!value_dependent_expression_p (expr) && !check_valid_ptrmem_cst_expr (type, expr, complain)) return NULL_TREE; expr = perform_qualification_conversions (type, expr); if (expr == error_mark_node) return expr; } else if (NULLPTR_TYPE_P (type)) { if (!NULLPTR_TYPE_P (TREE_TYPE (expr))) { if (complain & tf_error) error ("%qE is not a valid template argument for type %qT " "because it is of type %qT", expr, type, TREE_TYPE (expr)); return NULL_TREE; } return expr; } /* A template non-type parameter must be one of the above. */ else gcc_unreachable (); /* Sanity check: did we actually convert the argument to the right type? */ gcc_assert (same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (expr))); return convert_from_reference (expr); } /* Subroutine of coerce_template_template_parms, which returns 1 if PARM_PARM and ARG_PARM match using the rule for the template parameters of template template parameters. Both PARM and ARG are template parameters; the rest of the arguments are the same as for coerce_template_template_parms. */ static int coerce_template_template_parm (tree parm, tree arg, tsubst_flags_t complain, tree in_decl, tree outer_args) { if (arg == NULL_TREE || error_operand_p (arg) || parm == NULL_TREE || error_operand_p (parm)) return 0; if (TREE_CODE (arg) != TREE_CODE (parm)) return 0; switch (TREE_CODE (parm)) { case TEMPLATE_DECL: /* We encounter instantiations of templates like template <template <template <class> class> class TT> class C; */ { tree parmparm = DECL_INNERMOST_TEMPLATE_PARMS (parm); tree argparm = DECL_INNERMOST_TEMPLATE_PARMS (arg); if (!coerce_template_template_parms (parmparm, argparm, complain, in_decl, outer_args)) return 0; } /* Fall through. */ case TYPE_DECL: if (TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (arg)) && !TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (parm))) /* Argument is a parameter pack but parameter is not. */ return 0; break; case PARM_DECL: /* The tsubst call is used to handle cases such as template <int> class C {}; template <class T, template <T> class TT> class D {}; D<int, C> d; i.e. the parameter list of TT depends on earlier parameters. */ if (!uses_template_parms (TREE_TYPE (arg))) { tree t = tsubst (TREE_TYPE (parm), outer_args, complain, in_decl); if (!uses_template_parms (t) && !same_type_p (t, TREE_TYPE (arg))) return 0; } if (TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (arg)) && !TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))) /* Argument is a parameter pack but parameter is not. */ return 0; break; default: gcc_unreachable (); } return 1; } /* Coerce template argument list ARGLIST for use with template template-parameter TEMPL. */ static tree coerce_template_args_for_ttp (tree templ, tree arglist, tsubst_flags_t complain) { /* Consider an example where a template template parameter declared as template <class T, class U = std::allocator<T> > class TT The template parameter level of T and U are one level larger than of TT. To proper process the default argument of U, say when an instantiation `TT<int>' is seen, we need to build the full arguments containing {int} as the innermost level. Outer levels, available when not appearing as default template argument, can be obtained from the arguments of the enclosing template. Suppose that TT is later substituted with std::vector. The above instantiation is `TT<int, std::allocator<T> >' with TT at level 1, and T at level 2, while the template arguments at level 1 becomes {std::vector} and the inner level 2 is {int}. */ tree outer = DECL_CONTEXT (templ); if (outer) { if (DECL_TEMPLATE_SPECIALIZATION (outer)) /* We want arguments for the partial specialization, not arguments for the primary template. */ outer = template_parms_to_args (DECL_TEMPLATE_PARMS (outer)); else outer = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (outer))); } else if (current_template_parms) { /* This is an argument of the current template, so we haven't set DECL_CONTEXT yet. */ tree relevant_template_parms; /* Parameter levels that are greater than the level of the given template template parm are irrelevant. */ relevant_template_parms = current_template_parms; while (TMPL_PARMS_DEPTH (relevant_template_parms) != TEMPLATE_TYPE_LEVEL (TREE_TYPE (templ))) relevant_template_parms = TREE_CHAIN (relevant_template_parms); outer = template_parms_to_args (relevant_template_parms); } if (outer) arglist = add_to_template_args (outer, arglist); tree parmlist = DECL_INNERMOST_TEMPLATE_PARMS (templ); return coerce_template_parms (parmlist, arglist, templ, complain, /*require_all_args=*/true, /*use_default_args=*/true); } /* A cache of template template parameters with match-all default arguments. */ static GTY((deletable)) hash_map<tree,tree> *defaulted_ttp_cache; static void store_defaulted_ttp (tree v, tree t) { if (!defaulted_ttp_cache) defaulted_ttp_cache = hash_map<tree,tree>::create_ggc (13); defaulted_ttp_cache->put (v, t); } static tree lookup_defaulted_ttp (tree v) { if (defaulted_ttp_cache) if (tree *p = defaulted_ttp_cache->get (v)) return *p; return NULL_TREE; } /* T is a bound template template-parameter. Copy its arguments into default arguments of the template template-parameter's template parameters. */ static tree add_defaults_to_ttp (tree otmpl) { if (tree c = lookup_defaulted_ttp (otmpl)) return c; tree ntmpl = copy_node (otmpl); tree ntype = copy_node (TREE_TYPE (otmpl)); TYPE_STUB_DECL (ntype) = TYPE_NAME (ntype) = ntmpl; TYPE_MAIN_VARIANT (ntype) = ntype; TYPE_POINTER_TO (ntype) = TYPE_REFERENCE_TO (ntype) = NULL_TREE; TYPE_NAME (ntype) = ntmpl; SET_TYPE_STRUCTURAL_EQUALITY (ntype); tree idx = TEMPLATE_TYPE_PARM_INDEX (ntype) = copy_node (TEMPLATE_TYPE_PARM_INDEX (ntype)); TEMPLATE_PARM_DECL (idx) = ntmpl; TREE_TYPE (ntmpl) = TREE_TYPE (idx) = ntype; tree oparms = DECL_TEMPLATE_PARMS (otmpl); tree parms = DECL_TEMPLATE_PARMS (ntmpl) = copy_node (oparms); TREE_CHAIN (parms) = TREE_CHAIN (oparms); tree vec = TREE_VALUE (parms) = copy_node (TREE_VALUE (parms)); for (int i = 0; i < TREE_VEC_LENGTH (vec); ++i) { tree o = TREE_VEC_ELT (vec, i); if (!template_parameter_pack_p (TREE_VALUE (o))) { tree n = TREE_VEC_ELT (vec, i) = copy_node (o); TREE_PURPOSE (n) = any_targ_node; } } store_defaulted_ttp (otmpl, ntmpl); return ntmpl; } /* ARG is a bound potential template template-argument, and PARGS is a list of arguments for the corresponding template template-parameter. Adjust PARGS as appropriate for application to ARG's template, and if ARG is a BOUND_TEMPLATE_TEMPLATE_PARM, possibly adjust it to add default template arguments to the template template parameter. */ static tree coerce_ttp_args_for_tta (tree& arg, tree pargs, tsubst_flags_t complain) { ++processing_template_decl; tree arg_tmpl = TYPE_TI_TEMPLATE (arg); if (DECL_TEMPLATE_TEMPLATE_PARM_P (arg_tmpl)) { /* When comparing two template template-parameters in partial ordering, rewrite the one currently being used as an argument to have default arguments for all parameters. */ arg_tmpl = add_defaults_to_ttp (arg_tmpl); pargs = coerce_template_args_for_ttp (arg_tmpl, pargs, complain); if (pargs != error_mark_node) arg = bind_template_template_parm (TREE_TYPE (arg_tmpl), TYPE_TI_ARGS (arg)); } else { tree aparms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (arg_tmpl)); pargs = coerce_template_parms (aparms, pargs, arg_tmpl, complain, /*require_all*/true, /*use_default*/true); } --processing_template_decl; return pargs; } /* Subroutine of unify for the case when PARM is a BOUND_TEMPLATE_TEMPLATE_PARM. */ static int unify_bound_ttp_args (tree tparms, tree targs, tree parm, tree& arg, bool explain_p) { tree parmvec = TYPE_TI_ARGS (parm); tree argvec = INNERMOST_TEMPLATE_ARGS (TYPE_TI_ARGS (arg)); /* The template template parm might be variadic and the argument not, so flatten both argument lists. */ parmvec = expand_template_argument_pack (parmvec); argvec = expand_template_argument_pack (argvec); if (flag_new_ttp) { /* In keeping with P0522R0, adjust P's template arguments to apply to A's template; then flatten it again. */ tree nparmvec = parmvec; nparmvec = coerce_ttp_args_for_tta (arg, parmvec, tf_none); nparmvec = expand_template_argument_pack (nparmvec); if (unify (tparms, targs, nparmvec, argvec, UNIFY_ALLOW_NONE, explain_p)) return 1; /* If the P0522 adjustment eliminated a pack expansion, deduce empty packs. */ if (flag_new_ttp && TREE_VEC_LENGTH (nparmvec) < TREE_VEC_LENGTH (parmvec) && unify_pack_expansion (tparms, targs, parmvec, argvec, DEDUCE_EXACT, /*sub*/true, explain_p)) return 1; } else { /* Deduce arguments T, i from TT<T> or TT<i>. We check each element of PARMVEC and ARGVEC individually rather than the whole TREE_VEC since they can have different number of elements, which is allowed under N2555. */ int len = TREE_VEC_LENGTH (parmvec); /* Check if the parameters end in a pack, making them variadic. */ int parm_variadic_p = 0; if (len > 0 && PACK_EXPANSION_P (TREE_VEC_ELT (parmvec, len - 1))) parm_variadic_p = 1; for (int i = 0; i < len - parm_variadic_p; ++i) /* If the template argument list of P contains a pack expansion that is not the last template argument, the entire template argument list is a non-deduced context. */ if (PACK_EXPANSION_P (TREE_VEC_ELT (parmvec, i))) return unify_success (explain_p); if (TREE_VEC_LENGTH (argvec) < len - parm_variadic_p) return unify_too_few_arguments (explain_p, TREE_VEC_LENGTH (argvec), len); for (int i = 0; i < len - parm_variadic_p; ++i) if (unify (tparms, targs, TREE_VEC_ELT (parmvec, i), TREE_VEC_ELT (argvec, i), UNIFY_ALLOW_NONE, explain_p)) return 1; if (parm_variadic_p && unify_pack_expansion (tparms, targs, parmvec, argvec, DEDUCE_EXACT, /*subr=*/true, explain_p)) return 1; } return 0; } /* Return 1 if PARM_PARMS and ARG_PARMS matches using rule for template template parameters. Both PARM_PARMS and ARG_PARMS are vectors of TREE_LIST nodes containing TYPE_DECL, TEMPLATE_DECL or PARM_DECL. Consider the example: template <class T> class A; template<template <class U> class TT> class B; For B<A>, PARM_PARMS are the parameters to TT, while ARG_PARMS are the parameters to A, and OUTER_ARGS contains A. */ static int coerce_template_template_parms (tree parm_parms, tree arg_parms, tsubst_flags_t complain, tree in_decl, tree outer_args) { int nparms, nargs, i; tree parm, arg; int variadic_p = 0; gcc_assert (TREE_CODE (parm_parms) == TREE_VEC); gcc_assert (TREE_CODE (arg_parms) == TREE_VEC); nparms = TREE_VEC_LENGTH (parm_parms); nargs = TREE_VEC_LENGTH (arg_parms); if (flag_new_ttp) { /* P0522R0: A template template-parameter P is at least as specialized as a template template-argument A if, given the following rewrite to two function templates, the function template corresponding to P is at least as specialized as the function template corresponding to A according to the partial ordering rules for function templates ([temp.func.order]). Given an invented class template X with the template parameter list of A (including default arguments): * Each of the two function templates has the same template parameters, respectively, as P or A. * Each function template has a single function parameter whose type is a specialization of X with template arguments corresponding to the template parameters from the respective function template where, for each template parameter PP in the template parameter list of the function template, a corresponding template argument AA is formed. If PP declares a parameter pack, then AA is the pack expansion PP... ([temp.variadic]); otherwise, AA is the id-expression PP. If the rewrite produces an invalid type, then P is not at least as specialized as A. */ /* So coerce P's args to apply to A's parms, and then deduce between A's args and the converted args. If that succeeds, A is at least as specialized as P, so they match.*/ tree pargs = template_parms_level_to_args (parm_parms); ++processing_template_decl; pargs = coerce_template_parms (arg_parms, pargs, NULL_TREE, tf_none, /*require_all*/true, /*use_default*/true); --processing_template_decl; if (pargs != error_mark_node) { tree targs = make_tree_vec (nargs); tree aargs = template_parms_level_to_args (arg_parms); if (!unify (arg_parms, targs, aargs, pargs, UNIFY_ALLOW_NONE, /*explain*/false)) return 1; } } /* Determine whether we have a parameter pack at the end of the template template parameter's template parameter list. */ if (TREE_VEC_ELT (parm_parms, nparms - 1) != error_mark_node) { parm = TREE_VALUE (TREE_VEC_ELT (parm_parms, nparms - 1)); if (error_operand_p (parm)) return 0; switch (TREE_CODE (parm)) { case TEMPLATE_DECL: case TYPE_DECL: if (TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (parm))) variadic_p = 1; break; case PARM_DECL: if (TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))) variadic_p = 1; break; default: gcc_unreachable (); } } if (nargs != nparms && !(variadic_p && nargs >= nparms - 1)) return 0; /* Check all of the template parameters except the parameter pack at the end (if any). */ for (i = 0; i < nparms - variadic_p; ++i) { if (TREE_VEC_ELT (parm_parms, i) == error_mark_node || TREE_VEC_ELT (arg_parms, i) == error_mark_node) continue; parm = TREE_VALUE (TREE_VEC_ELT (parm_parms, i)); arg = TREE_VALUE (TREE_VEC_ELT (arg_parms, i)); if (!coerce_template_template_parm (parm, arg, complain, in_decl, outer_args)) return 0; } if (variadic_p) { /* Check each of the template parameters in the template argument against the template parameter pack at the end of the template template parameter. */ if (TREE_VEC_ELT (parm_parms, i) == error_mark_node) return 0; parm = TREE_VALUE (TREE_VEC_ELT (parm_parms, i)); for (; i < nargs; ++i) { if (TREE_VEC_ELT (arg_parms, i) == error_mark_node) continue; arg = TREE_VALUE (TREE_VEC_ELT (arg_parms, i)); if (!coerce_template_template_parm (parm, arg, complain, in_decl, outer_args)) return 0; } } return 1; } /* Verifies that the deduced template arguments (in TARGS) for the template template parameters (in TPARMS) represent valid bindings, by comparing the template parameter list of each template argument to the template parameter list of its corresponding template template parameter, in accordance with DR150. This routine can only be called after all template arguments have been deduced. It will return TRUE if all of the template template parameter bindings are okay, FALSE otherwise. */ bool template_template_parm_bindings_ok_p (tree tparms, tree targs) { int i, ntparms = TREE_VEC_LENGTH (tparms); bool ret = true; /* We're dealing with template parms in this process. */ ++processing_template_decl; targs = INNERMOST_TEMPLATE_ARGS (targs); for (i = 0; i < ntparms; ++i) { tree tparm = TREE_VALUE (TREE_VEC_ELT (tparms, i)); tree targ = TREE_VEC_ELT (targs, i); if (TREE_CODE (tparm) == TEMPLATE_DECL && targ) { tree packed_args = NULL_TREE; int idx, len = 1; if (ARGUMENT_PACK_P (targ)) { /* Look inside the argument pack. */ packed_args = ARGUMENT_PACK_ARGS (targ); len = TREE_VEC_LENGTH (packed_args); } for (idx = 0; idx < len; ++idx) { tree targ_parms = NULL_TREE; if (packed_args) /* Extract the next argument from the argument pack. */ targ = TREE_VEC_ELT (packed_args, idx); if (PACK_EXPANSION_P (targ)) /* Look at the pattern of the pack expansion. */ targ = PACK_EXPANSION_PATTERN (targ); /* Extract the template parameters from the template argument. */ if (TREE_CODE (targ) == TEMPLATE_DECL) targ_parms = DECL_INNERMOST_TEMPLATE_PARMS (targ); else if (TREE_CODE (targ) == TEMPLATE_TEMPLATE_PARM) targ_parms = DECL_INNERMOST_TEMPLATE_PARMS (TYPE_NAME (targ)); /* Verify that we can coerce the template template parameters from the template argument to the template parameter. This requires an exact match. */ if (targ_parms && !coerce_template_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (tparm), targ_parms, tf_none, tparm, targs)) { ret = false; goto out; } } } } out: --processing_template_decl; return ret; } /* Since type attributes aren't mangled, we need to strip them from template type arguments. */ static tree canonicalize_type_argument (tree arg, tsubst_flags_t complain) { if (!arg || arg == error_mark_node || arg == TYPE_CANONICAL (arg)) return arg; bool removed_attributes = false; tree canon = strip_typedefs (arg, &removed_attributes); if (removed_attributes && (complain & tf_warning)) warning (OPT_Wignored_attributes, "ignoring attributes on template argument %qT", arg); return canon; } /* And from inside dependent non-type arguments like sizeof(Type). */ static tree canonicalize_expr_argument (tree arg, tsubst_flags_t complain) { if (!arg || arg == error_mark_node) return arg; bool removed_attributes = false; tree canon = strip_typedefs_expr (arg, &removed_attributes); if (removed_attributes && (complain & tf_warning)) warning (OPT_Wignored_attributes, "ignoring attributes in template argument %qE", arg); return canon; } // A template declaration can be substituted for a constrained // template template parameter only when the argument is more // constrained than the parameter. static bool is_compatible_template_arg (tree parm, tree arg) { tree parm_cons = get_constraints (parm); /* For now, allow constrained template template arguments and unconstrained template template parameters. */ if (parm_cons == NULL_TREE) return true; tree arg_cons = get_constraints (arg); // If the template parameter is constrained, we need to rewrite its // constraints in terms of the ARG's template parameters. This ensures // that all of the template parameter types will have the same depth. // // Note that this is only valid when coerce_template_template_parm is // true for the innermost template parameters of PARM and ARG. In other // words, because coercion is successful, this conversion will be valid. if (parm_cons) { tree args = template_parms_to_args (DECL_TEMPLATE_PARMS (arg)); parm_cons = tsubst_constraint_info (parm_cons, INNERMOST_TEMPLATE_ARGS (args), tf_none, NULL_TREE); if (parm_cons == error_mark_node) return false; } return subsumes (parm_cons, arg_cons); } // Convert a placeholder argument into a binding to the original // parameter. The original parameter is saved as the TREE_TYPE of // ARG. static inline tree convert_wildcard_argument (tree parm, tree arg) { TREE_TYPE (arg) = parm; return arg; } /* We can't fully resolve ARG given as a non-type template argument to TYPE, because one of them is dependent. But we need to represent the conversion for the benefit of cp_tree_equal. */ static tree maybe_convert_nontype_argument (tree type, tree arg) { /* Auto parms get no conversion. */ if (type_uses_auto (type)) return arg; /* We don't need or want to add this conversion now if we're going to use the argument for deduction. */ if (value_dependent_expression_p (arg)) return arg; type = cv_unqualified (type); tree argtype = TREE_TYPE (arg); if (same_type_p (type, argtype)) return arg; arg = build1 (IMPLICIT_CONV_EXPR, type, arg); IMPLICIT_CONV_EXPR_NONTYPE_ARG (arg) = true; return arg; } /* Convert the indicated template ARG as necessary to match the indicated template PARM. Returns the converted ARG, or error_mark_node if the conversion was unsuccessful. Error and warning messages are issued under control of COMPLAIN. This conversion is for the Ith parameter in the parameter list. ARGS is the full set of template arguments deduced so far. */ static tree convert_template_argument (tree parm, tree arg, tree args, tsubst_flags_t complain, int i, tree in_decl) { tree orig_arg; tree val; int is_type, requires_type, is_tmpl_type, requires_tmpl_type; if (parm == error_mark_node) return error_mark_node; /* Trivially convert placeholders. */ if (TREE_CODE (arg) == WILDCARD_DECL) return convert_wildcard_argument (parm, arg); if (arg == any_targ_node) return arg; if (TREE_CODE (arg) == TREE_LIST && TREE_CODE (TREE_VALUE (arg)) == OFFSET_REF) { /* The template argument was the name of some member function. That's usually invalid, but static members are OK. In any case, grab the underlying fields/functions and issue an error later if required. */ orig_arg = TREE_VALUE (arg); TREE_TYPE (arg) = unknown_type_node; } orig_arg = arg; requires_tmpl_type = TREE_CODE (parm) == TEMPLATE_DECL; requires_type = (TREE_CODE (parm) == TYPE_DECL || requires_tmpl_type); /* When determining whether an argument pack expansion is a template, look at the pattern. */ if (TREE_CODE (arg) == TYPE_PACK_EXPANSION) arg = PACK_EXPANSION_PATTERN (arg); /* Deal with an injected-class-name used as a template template arg. */ if (requires_tmpl_type && CLASS_TYPE_P (arg)) { tree t = maybe_get_template_decl_from_type_decl (TYPE_NAME (arg)); if (TREE_CODE (t) == TEMPLATE_DECL) { if (cxx_dialect >= cxx11) /* OK under DR 1004. */; else if (complain & tf_warning_or_error) pedwarn (input_location, OPT_Wpedantic, "injected-class-name %qD" " used as template template argument", TYPE_NAME (arg)); else if (flag_pedantic_errors) t = arg; arg = t; } } is_tmpl_type = ((TREE_CODE (arg) == TEMPLATE_DECL && TREE_CODE (DECL_TEMPLATE_RESULT (arg)) == TYPE_DECL) || (requires_tmpl_type && TREE_CODE (arg) == TYPE_ARGUMENT_PACK) || TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM || TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE); if (is_tmpl_type && (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM || TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE)) arg = TYPE_STUB_DECL (arg); is_type = TYPE_P (arg) || is_tmpl_type; if (requires_type && ! is_type && TREE_CODE (arg) == SCOPE_REF && TREE_CODE (TREE_OPERAND (arg, 0)) == TEMPLATE_TYPE_PARM) { if (TREE_CODE (TREE_OPERAND (arg, 1)) == BIT_NOT_EXPR) { if (complain & tf_error) error ("invalid use of destructor %qE as a type", orig_arg); return error_mark_node; } permerror (input_location, "to refer to a type member of a template parameter, " "use %<typename %E%>", orig_arg); orig_arg = make_typename_type (TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1), typename_type, complain); arg = orig_arg; is_type = 1; } if (is_type != requires_type) { if (in_decl) { if (complain & tf_error) { error ("type/value mismatch at argument %d in template " "parameter list for %qD", i + 1, in_decl); if (is_type) inform (input_location, " expected a constant of type %qT, got %qT", TREE_TYPE (parm), (DECL_P (arg) ? DECL_NAME (arg) : orig_arg)); else if (requires_tmpl_type) inform (input_location, " expected a class template, got %qE", orig_arg); else inform (input_location, " expected a type, got %qE", orig_arg); } } return error_mark_node; } if (is_tmpl_type ^ requires_tmpl_type) { if (in_decl && (complain & tf_error)) { error ("type/value mismatch at argument %d in template " "parameter list for %qD", i + 1, in_decl); if (is_tmpl_type) inform (input_location, " expected a type, got %qT", DECL_NAME (arg)); else inform (input_location, " expected a class template, got %qT", orig_arg); } return error_mark_node; } if (template_parameter_pack_p (parm) && ARGUMENT_PACK_P (orig_arg)) /* We already did the appropriate conversion when packing args. */ val = orig_arg; else if (is_type) { if (requires_tmpl_type) { if (TREE_CODE (TREE_TYPE (arg)) == UNBOUND_CLASS_TEMPLATE) /* The number of argument required is not known yet. Just accept it for now. */ val = orig_arg; else { tree parmparm = DECL_INNERMOST_TEMPLATE_PARMS (parm); tree argparm; /* Strip alias templates that are equivalent to another template. */ arg = get_underlying_template (arg); argparm = DECL_INNERMOST_TEMPLATE_PARMS (arg); if (coerce_template_template_parms (parmparm, argparm, complain, in_decl, args)) { val = arg; /* TEMPLATE_TEMPLATE_PARM node is preferred over TEMPLATE_DECL. */ if (val != error_mark_node) { if (DECL_TEMPLATE_TEMPLATE_PARM_P (val)) val = TREE_TYPE (val); if (TREE_CODE (orig_arg) == TYPE_PACK_EXPANSION) val = make_pack_expansion (val, complain); } } else { if (in_decl && (complain & tf_error)) { error ("type/value mismatch at argument %d in " "template parameter list for %qD", i + 1, in_decl); inform (input_location, " expected a template of type %qD, got %qT", parm, orig_arg); } val = error_mark_node; } // Check that the constraints are compatible before allowing the // substitution. if (val != error_mark_node) if (!is_compatible_template_arg (parm, arg)) { if (in_decl && (complain & tf_error)) { error ("constraint mismatch at argument %d in " "template parameter list for %qD", i + 1, in_decl); inform (input_location, " expected %qD but got %qD", parm, arg); } val = error_mark_node; } } } else val = orig_arg; /* We only form one instance of each template specialization. Therefore, if we use a non-canonical variant (i.e., a typedef), any future messages referring to the type will use the typedef, which is confusing if those future uses do not themselves also use the typedef. */ if (TYPE_P (val)) val = canonicalize_type_argument (val, complain); } else { tree t = TREE_TYPE (parm); if (TEMPLATE_PARM_LEVEL (get_template_parm_index (parm)) > TMPL_ARGS_DEPTH (args)) /* We don't have enough levels of args to do any substitution. This can happen in the context of -fnew-ttp-matching. */; else if (tree a = type_uses_auto (t)) { t = do_auto_deduction (t, arg, a, complain, adc_unify, args); if (t == error_mark_node) return error_mark_node; } else t = tsubst (t, args, complain, in_decl); if (invalid_nontype_parm_type_p (t, complain)) return error_mark_node; if (!type_dependent_expression_p (orig_arg) && !uses_template_parms (t)) /* We used to call digest_init here. However, digest_init will report errors, which we don't want when complain is zero. More importantly, digest_init will try too hard to convert things: for example, `0' should not be converted to pointer type at this point according to the standard. Accepting this is not merely an extension, since deciding whether or not these conversions can occur is part of determining which function template to call, or whether a given explicit argument specification is valid. */ val = convert_nontype_argument (t, orig_arg, complain); else { val = canonicalize_expr_argument (orig_arg, complain); val = maybe_convert_nontype_argument (t, val); } if (val == NULL_TREE) val = error_mark_node; else if (val == error_mark_node && (complain & tf_error)) error ("could not convert template argument %qE from %qT to %qT", orig_arg, TREE_TYPE (orig_arg), t); if (INDIRECT_REF_P (val)) { /* Reject template arguments that are references to built-in functions with no library fallbacks. */ const_tree inner = TREE_OPERAND (val, 0); const_tree innertype = TREE_TYPE (inner); if (innertype && TREE_CODE (innertype) == REFERENCE_TYPE && TREE_CODE (TREE_TYPE (innertype)) == FUNCTION_TYPE && TREE_OPERAND_LENGTH (inner) > 0 && reject_gcc_builtin (TREE_OPERAND (inner, 0))) return error_mark_node; } if (TREE_CODE (val) == SCOPE_REF) { /* Strip typedefs from the SCOPE_REF. */ tree type = canonicalize_type_argument (TREE_TYPE (val), complain); tree scope = canonicalize_type_argument (TREE_OPERAND (val, 0), complain); val = build_qualified_name (type, scope, TREE_OPERAND (val, 1), QUALIFIED_NAME_IS_TEMPLATE (val)); } } return val; } /* Coerces the remaining template arguments in INNER_ARGS (from ARG_IDX to the end) into the parameter pack at PARM_IDX in PARMS. Returns the coerced argument pack. PARM_IDX is the position of this parameter in the template parameter list. ARGS is the original template argument list. */ static tree coerce_template_parameter_pack (tree parms, int parm_idx, tree args, tree inner_args, int arg_idx, tree new_args, int* lost, tree in_decl, tsubst_flags_t complain) { tree parm = TREE_VEC_ELT (parms, parm_idx); int nargs = inner_args ? NUM_TMPL_ARGS (inner_args) : 0; tree packed_args; tree argument_pack; tree packed_parms = NULL_TREE; if (arg_idx > nargs) arg_idx = nargs; if (tree packs = fixed_parameter_pack_p (TREE_VALUE (parm))) { /* When the template parameter is a non-type template parameter pack or template template parameter pack whose type or template parameters use parameter packs, we know exactly how many arguments we are looking for. Build a vector of the instantiated decls for these template parameters in PACKED_PARMS. */ /* We can't use make_pack_expansion here because it would interpret a _DECL as a use rather than a declaration. */ tree decl = TREE_VALUE (parm); tree exp = cxx_make_type (TYPE_PACK_EXPANSION); SET_PACK_EXPANSION_PATTERN (exp, decl); PACK_EXPANSION_PARAMETER_PACKS (exp) = packs; SET_TYPE_STRUCTURAL_EQUALITY (exp); TREE_VEC_LENGTH (args)--; packed_parms = tsubst_pack_expansion (exp, args, complain, decl); TREE_VEC_LENGTH (args)++; if (packed_parms == error_mark_node) return error_mark_node; /* If we're doing a partial instantiation of a member template, verify that all of the types used for the non-type template parameter pack are, in fact, valid for non-type template parameters. */ if (arg_idx < nargs && PACK_EXPANSION_P (TREE_VEC_ELT (inner_args, arg_idx))) { int j, len = TREE_VEC_LENGTH (packed_parms); for (j = 0; j < len; ++j) { tree t = TREE_TYPE (TREE_VEC_ELT (packed_parms, j)); if (invalid_nontype_parm_type_p (t, complain)) return error_mark_node; } /* We don't know how many args we have yet, just use the unconverted ones for now. */ return NULL_TREE; } packed_args = make_tree_vec (TREE_VEC_LENGTH (packed_parms)); } /* Check if we have a placeholder pack, which indicates we're in the context of a introduction list. In that case we want to match this pack to the single placeholder. */ else if (arg_idx < nargs && TREE_CODE (TREE_VEC_ELT (inner_args, arg_idx)) == WILDCARD_DECL && WILDCARD_PACK_P (TREE_VEC_ELT (inner_args, arg_idx))) { nargs = arg_idx + 1; packed_args = make_tree_vec (1); } else packed_args = make_tree_vec (nargs - arg_idx); /* Convert the remaining arguments, which will be a part of the parameter pack "parm". */ int first_pack_arg = arg_idx; for (; arg_idx < nargs; ++arg_idx) { tree arg = TREE_VEC_ELT (inner_args, arg_idx); tree actual_parm = TREE_VALUE (parm); int pack_idx = arg_idx - first_pack_arg; if (packed_parms) { /* Once we've packed as many args as we have types, stop. */ if (pack_idx >= TREE_VEC_LENGTH (packed_parms)) break; else if (PACK_EXPANSION_P (arg)) /* We don't know how many args we have yet, just use the unconverted ones for now. */ return NULL_TREE; else actual_parm = TREE_VEC_ELT (packed_parms, pack_idx); } if (arg == error_mark_node) { if (complain & tf_error) error ("template argument %d is invalid", arg_idx + 1); } else arg = convert_template_argument (actual_parm, arg, new_args, complain, parm_idx, in_decl); if (arg == error_mark_node) (*lost)++; TREE_VEC_ELT (packed_args, pack_idx) = arg; } if (arg_idx - first_pack_arg < TREE_VEC_LENGTH (packed_args) && TREE_VEC_LENGTH (packed_args) > 0) { if (complain & tf_error) error ("wrong number of template arguments (%d, should be %d)", arg_idx - first_pack_arg, TREE_VEC_LENGTH (packed_args)); return error_mark_node; } if (TREE_CODE (TREE_VALUE (parm)) == TYPE_DECL || TREE_CODE (TREE_VALUE (parm)) == TEMPLATE_DECL) argument_pack = cxx_make_type (TYPE_ARGUMENT_PACK); else { argument_pack = make_node (NONTYPE_ARGUMENT_PACK); TREE_CONSTANT (argument_pack) = 1; } SET_ARGUMENT_PACK_ARGS (argument_pack, packed_args); if (CHECKING_P) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (packed_args, TREE_VEC_LENGTH (packed_args)); return argument_pack; } /* Returns the number of pack expansions in the template argument vector ARGS. */ static int pack_expansion_args_count (tree args) { int i; int count = 0; if (args) for (i = 0; i < TREE_VEC_LENGTH (args); ++i) { tree elt = TREE_VEC_ELT (args, i); if (elt && PACK_EXPANSION_P (elt)) ++count; } return count; } /* Convert all template arguments to their appropriate types, and return a vector containing the innermost resulting template arguments. If any error occurs, return error_mark_node. Error and warning messages are issued under control of COMPLAIN. If REQUIRE_ALL_ARGS is false, argument deduction will be performed for arguments not specified in ARGS. Otherwise, if USE_DEFAULT_ARGS is true, default arguments will be used to fill in unspecified arguments. If REQUIRE_ALL_ARGS is true, but USE_DEFAULT_ARGS is false, then all arguments must be specified in ARGS. */ static tree coerce_template_parms (tree parms, tree args, tree in_decl, tsubst_flags_t complain, bool require_all_args, bool use_default_args) { int nparms, nargs, parm_idx, arg_idx, lost = 0; tree orig_inner_args; tree inner_args; tree new_args; tree new_inner_args; int saved_unevaluated_operand; int saved_inhibit_evaluation_warnings; /* When used as a boolean value, indicates whether this is a variadic template parameter list. Since it's an int, we can also subtract it from nparms to get the number of non-variadic parameters. */ int variadic_p = 0; int variadic_args_p = 0; int post_variadic_parms = 0; /* Adjustment to nparms for fixed parameter packs. */ int fixed_pack_adjust = 0; int fixed_packs = 0; int missing = 0; /* Likewise for parameters with default arguments. */ int default_p = 0; if (args == error_mark_node) return error_mark_node; nparms = TREE_VEC_LENGTH (parms); /* Determine if there are any parameter packs or default arguments. */ for (parm_idx = 0; parm_idx < nparms; ++parm_idx) { tree parm = TREE_VEC_ELT (parms, parm_idx); if (variadic_p) ++post_variadic_parms; if (template_parameter_pack_p (TREE_VALUE (parm))) ++variadic_p; if (TREE_PURPOSE (parm)) ++default_p; } inner_args = orig_inner_args = INNERMOST_TEMPLATE_ARGS (args); /* If there are no parameters that follow a parameter pack, we need to expand any argument packs so that we can deduce a parameter pack from some non-packed args followed by an argument pack, as in variadic85.C. If there are such parameters, we need to leave argument packs intact so the arguments are assigned properly. This can happen when dealing with a nested class inside a partial specialization of a class template, as in variadic92.C, or when deducing a template parameter pack from a sub-declarator, as in variadic114.C. */ if (!post_variadic_parms) inner_args = expand_template_argument_pack (inner_args); /* Count any pack expansion args. */ variadic_args_p = pack_expansion_args_count (inner_args); nargs = inner_args ? NUM_TMPL_ARGS (inner_args) : 0; if ((nargs - variadic_args_p > nparms && !variadic_p) || (nargs < nparms - variadic_p && require_all_args && !variadic_args_p && (!use_default_args || (TREE_VEC_ELT (parms, nargs) != error_mark_node && !TREE_PURPOSE (TREE_VEC_ELT (parms, nargs)))))) { bad_nargs: if (complain & tf_error) { if (variadic_p || default_p) { nparms -= variadic_p + default_p; error ("wrong number of template arguments " "(%d, should be at least %d)", nargs, nparms); } else error ("wrong number of template arguments " "(%d, should be %d)", nargs, nparms); if (in_decl) inform (DECL_SOURCE_LOCATION (in_decl), "provided for %qD", in_decl); } return error_mark_node; } /* We can't pass a pack expansion to a non-pack parameter of an alias template (DR 1430). */ else if (in_decl && (DECL_ALIAS_TEMPLATE_P (in_decl) || concept_template_p (in_decl)) && variadic_args_p && nargs - variadic_args_p < nparms - variadic_p) { if (complain & tf_error) { for (int i = 0; i < TREE_VEC_LENGTH (inner_args); ++i) { tree arg = TREE_VEC_ELT (inner_args, i); tree parm = TREE_VALUE (TREE_VEC_ELT (parms, i)); if (PACK_EXPANSION_P (arg) && !template_parameter_pack_p (parm)) { if (DECL_ALIAS_TEMPLATE_P (in_decl)) error_at (location_of (arg), "pack expansion argument for non-pack parameter " "%qD of alias template %qD", parm, in_decl); else error_at (location_of (arg), "pack expansion argument for non-pack parameter " "%qD of concept %qD", parm, in_decl); inform (DECL_SOURCE_LOCATION (parm), "declared here"); goto found; } } gcc_unreachable (); found:; } return error_mark_node; } /* We need to evaluate the template arguments, even though this template-id may be nested within a "sizeof". */ saved_unevaluated_operand = cp_unevaluated_operand; cp_unevaluated_operand = 0; saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings; c_inhibit_evaluation_warnings = 0; new_inner_args = make_tree_vec (nparms); new_args = add_outermost_template_args (args, new_inner_args); int pack_adjust = 0; for (parm_idx = 0, arg_idx = 0; parm_idx < nparms; parm_idx++, arg_idx++) { tree arg; tree parm; /* Get the Ith template parameter. */ parm = TREE_VEC_ELT (parms, parm_idx); if (parm == error_mark_node) { TREE_VEC_ELT (new_inner_args, arg_idx) = error_mark_node; continue; } /* Calculate the next argument. */ if (arg_idx < nargs) arg = TREE_VEC_ELT (inner_args, arg_idx); else arg = NULL_TREE; if (template_parameter_pack_p (TREE_VALUE (parm)) && !(arg && ARGUMENT_PACK_P (arg))) { /* Some arguments will be placed in the template parameter pack PARM. */ arg = coerce_template_parameter_pack (parms, parm_idx, args, inner_args, arg_idx, new_args, &lost, in_decl, complain); if (arg == NULL_TREE) { /* We don't know how many args we have yet, just use the unconverted (and still packed) ones for now. */ new_inner_args = orig_inner_args; arg_idx = nargs; break; } TREE_VEC_ELT (new_inner_args, parm_idx) = arg; /* Store this argument. */ if (arg == error_mark_node) { lost++; /* We are done with all of the arguments. */ arg_idx = nargs; break; } else { pack_adjust = TREE_VEC_LENGTH (ARGUMENT_PACK_ARGS (arg)) - 1; arg_idx += pack_adjust; if (fixed_parameter_pack_p (TREE_VALUE (parm))) { ++fixed_packs; fixed_pack_adjust += pack_adjust; } } continue; } else if (arg) { if (PACK_EXPANSION_P (arg)) { /* "If every valid specialization of a variadic template requires an empty template parameter pack, the template is ill-formed, no diagnostic required." So check that the pattern works with this parameter. */ tree pattern = PACK_EXPANSION_PATTERN (arg); tree conv = convert_template_argument (TREE_VALUE (parm), pattern, new_args, complain, parm_idx, in_decl); if (conv == error_mark_node) { if (complain & tf_error) inform (input_location, "so any instantiation with a " "non-empty parameter pack would be ill-formed"); ++lost; } else if (TYPE_P (conv) && !TYPE_P (pattern)) /* Recover from missing typename. */ TREE_VEC_ELT (inner_args, arg_idx) = make_pack_expansion (conv, complain); /* We don't know how many args we have yet, just use the unconverted ones for now. */ new_inner_args = inner_args; arg_idx = nargs; break; } } else if (require_all_args) { /* There must be a default arg in this case. */ arg = tsubst_template_arg (TREE_PURPOSE (parm), new_args, complain, in_decl); /* The position of the first default template argument, is also the number of non-defaulted arguments in NEW_INNER_ARGS. Record that. */ if (!NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args)) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args, arg_idx - pack_adjust); } else break; if (arg == error_mark_node) { if (complain & tf_error) error ("template argument %d is invalid", arg_idx + 1); } else if (!arg) { /* This can occur if there was an error in the template parameter list itself (which we would already have reported) that we are trying to recover from, e.g., a class template with a parameter list such as template<typename..., typename> (cpp0x/variadic150.C). */ ++lost; /* This can also happen with a fixed parameter pack (71834). */ if (arg_idx >= nargs) ++missing; } else arg = convert_template_argument (TREE_VALUE (parm), arg, new_args, complain, parm_idx, in_decl); if (arg == error_mark_node) lost++; TREE_VEC_ELT (new_inner_args, arg_idx - pack_adjust) = arg; } cp_unevaluated_operand = saved_unevaluated_operand; c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings; if (missing || arg_idx < nargs - variadic_args_p) { /* If we had fixed parameter packs, we didn't know how many arguments we actually needed earlier; now we do. */ nparms += fixed_pack_adjust; variadic_p -= fixed_packs; goto bad_nargs; } if (arg_idx < nargs) { /* We had some pack expansion arguments that will only work if the packs are empty, but wait until instantiation time to complain. See variadic-ttp3.C. */ int len = nparms + (nargs - arg_idx); tree args = make_tree_vec (len); int i = 0; for (; i < nparms; ++i) TREE_VEC_ELT (args, i) = TREE_VEC_ELT (new_inner_args, i); for (; i < len; ++i, ++arg_idx) TREE_VEC_ELT (args, i) = TREE_VEC_ELT (inner_args, arg_idx - pack_adjust); new_inner_args = args; } if (lost) { gcc_assert (!(complain & tf_error) || seen_error ()); return error_mark_node; } if (CHECKING_P && !NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args)) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args, TREE_VEC_LENGTH (new_inner_args)); return new_inner_args; } /* Convert all template arguments to their appropriate types, and return a vector containing the innermost resulting template arguments. If any error occurs, return error_mark_node. Error and warning messages are not issued. Note that no function argument deduction is performed, and default arguments are used to fill in unspecified arguments. */ tree coerce_template_parms (tree parms, tree args, tree in_decl) { return coerce_template_parms (parms, args, in_decl, tf_none, true, true); } /* Convert all template arguments to their appropriate type, and instantiate default arguments as needed. This returns a vector containing the innermost resulting template arguments, or error_mark_node if unsuccessful. */ tree coerce_template_parms (tree parms, tree args, tree in_decl, tsubst_flags_t complain) { return coerce_template_parms (parms, args, in_decl, complain, true, true); } /* Like coerce_template_parms. If PARMS represents all template parameters levels, this function returns a vector of vectors representing all the resulting argument levels. Note that in this case, only the innermost arguments are coerced because the outermost ones are supposed to have been coerced already. Otherwise, if PARMS represents only (the innermost) vector of parameters, this function returns a vector containing just the innermost resulting arguments. */ static tree coerce_innermost_template_parms (tree parms, tree args, tree in_decl, tsubst_flags_t complain, bool require_all_args, bool use_default_args) { int parms_depth = TMPL_PARMS_DEPTH (parms); int args_depth = TMPL_ARGS_DEPTH (args); tree coerced_args; if (parms_depth > 1) { coerced_args = make_tree_vec (parms_depth); tree level; int cur_depth; for (level = parms, cur_depth = parms_depth; parms_depth > 0 && level != NULL_TREE; level = TREE_CHAIN (level), --cur_depth) { tree l; if (cur_depth == args_depth) l = coerce_template_parms (TREE_VALUE (level), args, in_decl, complain, require_all_args, use_default_args); else l = TMPL_ARGS_LEVEL (args, cur_depth); if (l == error_mark_node) return error_mark_node; SET_TMPL_ARGS_LEVEL (coerced_args, cur_depth, l); } } else coerced_args = coerce_template_parms (INNERMOST_TEMPLATE_PARMS (parms), args, in_decl, complain, require_all_args, use_default_args); return coerced_args; } /* Returns 1 if template args OT and NT are equivalent. */ int template_args_equal (tree ot, tree nt, bool partial_order /* = false */) { if (nt == ot) return 1; if (nt == NULL_TREE || ot == NULL_TREE) return false; if (nt == any_targ_node || ot == any_targ_node) return true; if (TREE_CODE (nt) == TREE_VEC) /* For member templates */ return TREE_CODE (ot) == TREE_VEC && comp_template_args (ot, nt); else if (PACK_EXPANSION_P (ot)) return (PACK_EXPANSION_P (nt) && template_args_equal (PACK_EXPANSION_PATTERN (ot), PACK_EXPANSION_PATTERN (nt)) && template_args_equal (PACK_EXPANSION_EXTRA_ARGS (ot), PACK_EXPANSION_EXTRA_ARGS (nt))); else if (ARGUMENT_PACK_P (ot)) { int i, len; tree opack, npack; if (!ARGUMENT_PACK_P (nt)) return 0; opack = ARGUMENT_PACK_ARGS (ot); npack = ARGUMENT_PACK_ARGS (nt); len = TREE_VEC_LENGTH (opack); if (TREE_VEC_LENGTH (npack) != len) return 0; for (i = 0; i < len; ++i) if (!template_args_equal (TREE_VEC_ELT (opack, i), TREE_VEC_ELT (npack, i))) return 0; return 1; } else if (ot && TREE_CODE (ot) == ARGUMENT_PACK_SELECT) gcc_unreachable (); else if (TYPE_P (nt)) { if (!TYPE_P (ot)) return false; /* Don't treat an alias template specialization with dependent arguments as equivalent to its underlying type when used as a template argument; we need them to be distinct so that we substitute into the specialization arguments at instantiation time. And aliases can't be equivalent without being ==, so we don't need to look any deeper. During partial ordering, however, we need to treat them normally so that we can order uses of the same alias with different cv-qualification (79960). */ if (!partial_order && (TYPE_ALIAS_P (nt) || TYPE_ALIAS_P (ot))) return false; else return same_type_p (ot, nt); } else if (TREE_CODE (ot) == TREE_VEC || TYPE_P (ot)) return 0; else { /* Try to treat a template non-type argument that has been converted to the parameter type as equivalent to one that hasn't yet. */ for (enum tree_code code1 = TREE_CODE (ot); CONVERT_EXPR_CODE_P (code1) || code1 == NON_LVALUE_EXPR; code1 = TREE_CODE (ot)) ot = TREE_OPERAND (ot, 0); for (enum tree_code code2 = TREE_CODE (nt); CONVERT_EXPR_CODE_P (code2) || code2 == NON_LVALUE_EXPR; code2 = TREE_CODE (nt)) nt = TREE_OPERAND (nt, 0); return cp_tree_equal (ot, nt); } } /* Returns 1 iff the OLDARGS and NEWARGS are in fact identical sets of template arguments. Returns 0 otherwise, and updates OLDARG_PTR and NEWARG_PTR with the offending arguments if they are non-NULL. */ int comp_template_args (tree oldargs, tree newargs, tree *oldarg_ptr, tree *newarg_ptr, bool partial_order) { int i; if (oldargs == newargs) return 1; if (!oldargs || !newargs) return 0; if (TREE_VEC_LENGTH (oldargs) != TREE_VEC_LENGTH (newargs)) return 0; for (i = 0; i < TREE_VEC_LENGTH (oldargs); ++i) { tree nt = TREE_VEC_ELT (newargs, i); tree ot = TREE_VEC_ELT (oldargs, i); if (! template_args_equal (ot, nt, partial_order)) { if (oldarg_ptr != NULL) *oldarg_ptr = ot; if (newarg_ptr != NULL) *newarg_ptr = nt; return 0; } } return 1; } inline bool comp_template_args_porder (tree oargs, tree nargs) { return comp_template_args (oargs, nargs, NULL, NULL, true); } /* Implement a freelist interface for objects of type T. Head is a separate object, rather than a regular member, so that we can define it as a GTY deletable pointer, which is highly desirable. A data member could be declared that way, but then the containing object would implicitly get GTY((user)), which would prevent us from instantiating freelists as global objects. Although this way we can create freelist global objects, they're such thin wrappers that instantiating temporaries at every use loses nothing and saves permanent storage for the freelist object. Member functions next, anew, poison and reinit have default implementations that work for most of the types we're interested in, but if they don't work for some type, they should be explicitly specialized. See the comments before them for requirements, and the example specializations for the tree_list_freelist. */ template <typename T> class freelist { /* Return the next object in a chain. We could just do type punning, but if we access the object with its underlying type, we avoid strict-aliasing trouble. This needs only work between poison and reinit. */ static T *&next (T *obj) { return obj->next; } /* Return a newly allocated, uninitialized or minimally-initialized object of type T. Any initialization performed by anew should either remain across the life of the object and the execution of poison, or be redone by reinit. */ static T *anew () { return ggc_alloc<T> (); } /* Optionally scribble all over the bits holding the object, so that they become (mostly?) uninitialized memory. This is called while preparing to make the object part of the free list. */ static void poison (T *obj) { T *p ATTRIBUTE_UNUSED = obj; T **q ATTRIBUTE_UNUSED = &next (obj); #ifdef ENABLE_GC_CHECKING /* Poison the data, to indicate the data is garbage. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, sizeof (*p))); memset (p, 0xa5, sizeof (*p)); #endif /* Let valgrind know the object is free. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, sizeof (*p))); /* Let valgrind know the next portion of the object is available, but uninitialized. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (q, sizeof (*q))); } /* Bring an object that underwent at least one lifecycle after anew and before the most recent free and poison, back to a usable state, reinitializing whatever is needed for it to be functionally equivalent to an object just allocated and returned by anew. This may poison or clear the next field, used by freelist housekeeping after poison was called. */ static void reinit (T *obj) { T **q ATTRIBUTE_UNUSED = &next (obj); #ifdef ENABLE_GC_CHECKING memset (q, 0xa5, sizeof (*q)); #endif /* Let valgrind know the entire object is available, but uninitialized. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (obj, sizeof (*obj))); } /* Reference a GTY-deletable pointer that points to the first object in the free list proper. */ T *&head; public: /* Construct a freelist object chaining objects off of HEAD. */ freelist (T *&head) : head(head) {} /* Add OBJ to the free object list. The former head becomes OBJ's successor. */ void free (T *obj) { poison (obj); next (obj) = head; head = obj; } /* Take an object from the free list, if one is available, or allocate a new one. Objects taken from the free list should be regarded as filled with garbage, except for bits that are configured to be preserved across free and alloc. */ T *alloc () { if (head) { T *obj = head; head = next (head); reinit (obj); return obj; } else return anew (); } }; /* Explicitly specialize the interfaces for freelist<tree_node>: we want to allocate a TREE_LIST using the usual interface, and ensure TREE_CHAIN remains functional. Alas, we have to duplicate a bit of build_tree_list logic in reinit, so this could go out of sync. */ template <> inline tree & freelist<tree_node>::next (tree obj) { return TREE_CHAIN (obj); } template <> inline tree freelist<tree_node>::anew () { return build_tree_list (NULL, NULL); } template <> inline void freelist<tree_node>::poison (tree obj ATTRIBUTE_UNUSED) { int size ATTRIBUTE_UNUSED = sizeof (tree_list); tree p ATTRIBUTE_UNUSED = obj; tree_base *b ATTRIBUTE_UNUSED = &obj->base; tree *q ATTRIBUTE_UNUSED = &next (obj); #ifdef ENABLE_GC_CHECKING gcc_checking_assert (TREE_CODE (obj) == TREE_LIST); /* Poison the data, to indicate the data is garbage. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size)); memset (p, 0xa5, size); #endif /* Let valgrind know the object is free. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size)); /* But we still want to use the TREE_CODE and TREE_CHAIN parts. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (b, sizeof (*b))); VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (q, sizeof (*q))); #ifdef ENABLE_GC_CHECKING VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (b, sizeof (*b))); /* Keep TREE_CHAIN functional. */ TREE_SET_CODE (obj, TREE_LIST); #else VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (b, sizeof (*b))); #endif } template <> inline void freelist<tree_node>::reinit (tree obj ATTRIBUTE_UNUSED) { tree_base *b ATTRIBUTE_UNUSED = &obj->base; #ifdef ENABLE_GC_CHECKING gcc_checking_assert (TREE_CODE (obj) == TREE_LIST); VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (obj, sizeof (tree_list))); memset (obj, 0, sizeof (tree_list)); #endif /* Let valgrind know the entire object is available, but uninitialized. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (obj, sizeof (tree_list))); #ifdef ENABLE_GC_CHECKING TREE_SET_CODE (obj, TREE_LIST); #else VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (b, sizeof (*b))); #endif } /* Point to the first object in the TREE_LIST freelist. */ static GTY((deletable)) tree tree_list_freelist_head; /* Return the/an actual TREE_LIST freelist. */ static inline freelist<tree_node> tree_list_freelist () { return tree_list_freelist_head; } /* Point to the first object in the tinst_level freelist. */ static GTY((deletable)) tinst_level *tinst_level_freelist_head; /* Return the/an actual tinst_level freelist. */ static inline freelist<tinst_level> tinst_level_freelist () { return tinst_level_freelist_head; } /* Point to the first object in the pending_template freelist. */ static GTY((deletable)) pending_template *pending_template_freelist_head; /* Return the/an actual pending_template freelist. */ static inline freelist<pending_template> pending_template_freelist () { return pending_template_freelist_head; } /* Build the TREE_LIST object out of a split list, store it permanently, and return it. */ tree tinst_level::to_list () { gcc_assert (split_list_p ()); tree ret = tree_list_freelist ().alloc (); TREE_PURPOSE (ret) = tldcl; TREE_VALUE (ret) = targs; tldcl = ret; targs = NULL; gcc_assert (tree_list_p ()); return ret; } const unsigned short tinst_level::refcount_infinity; /* Increment OBJ's refcount unless it is already infinite. */ static tinst_level * inc_refcount_use (tinst_level *obj) { if (obj && obj->refcount != tinst_level::refcount_infinity) ++obj->refcount; return obj; } /* Release storage for OBJ and node, if it's a TREE_LIST. */ void tinst_level::free (tinst_level *obj) { if (obj->tree_list_p ()) tree_list_freelist ().free (obj->get_node ()); tinst_level_freelist ().free (obj); } /* Decrement OBJ's refcount if not infinite. If it reaches zero, release OBJ's DECL and OBJ, and start over with the tinst_level object that used to be referenced by OBJ's NEXT. */ static void dec_refcount_use (tinst_level *obj) { while (obj && obj->refcount != tinst_level::refcount_infinity && !--obj->refcount) { tinst_level *next = obj->next; tinst_level::free (obj); obj = next; } } /* Modify PTR so that it points to OBJ, adjusting the refcounts of OBJ and of the former PTR. Omitting the second argument is equivalent to passing (T*)NULL; this is allowed because passing the zero-valued integral constant NULL confuses type deduction and/or overload resolution. */ template <typename T> static void set_refcount_ptr (T *& ptr, T *obj = NULL) { T *save = ptr; ptr = inc_refcount_use (obj); dec_refcount_use (save); } static void add_pending_template (tree d) { tree ti = (TYPE_P (d) ? CLASSTYPE_TEMPLATE_INFO (d) : DECL_TEMPLATE_INFO (d)); struct pending_template *pt; int level; if (TI_PENDING_TEMPLATE_FLAG (ti)) return; /* We are called both from instantiate_decl, where we've already had a tinst_level pushed, and instantiate_template, where we haven't. Compensate. */ gcc_assert (TREE_CODE (d) != TREE_LIST); level = !current_tinst_level || current_tinst_level->maybe_get_node () != d; if (level) push_tinst_level (d); pt = pending_template_freelist ().alloc (); pt->next = NULL; pt->tinst = NULL; set_refcount_ptr (pt->tinst, current_tinst_level); if (last_pending_template) last_pending_template->next = pt; else pending_templates = pt; last_pending_template = pt; TI_PENDING_TEMPLATE_FLAG (ti) = 1; if (level) pop_tinst_level (); } /* Return a TEMPLATE_ID_EXPR corresponding to the indicated FNS and ARGLIST. Valid choices for FNS are given in the cp-tree.def documentation for TEMPLATE_ID_EXPR. */ tree lookup_template_function (tree fns, tree arglist) { tree type; if (fns == error_mark_node || arglist == error_mark_node) return error_mark_node; gcc_assert (!arglist || TREE_CODE (arglist) == TREE_VEC); if (!is_overloaded_fn (fns) && !identifier_p (fns)) { error ("%q#D is not a function template", fns); return error_mark_node; } if (BASELINK_P (fns)) { BASELINK_FUNCTIONS (fns) = build2 (TEMPLATE_ID_EXPR, unknown_type_node, BASELINK_FUNCTIONS (fns), arglist); return fns; } type = TREE_TYPE (fns); if (TREE_CODE (fns) == OVERLOAD || !type) type = unknown_type_node; return build2 (TEMPLATE_ID_EXPR, type, fns, arglist); } /* Within the scope of a template class S<T>, the name S gets bound (in build_self_reference) to a TYPE_DECL for the class, not a TEMPLATE_DECL. If DECL is a TYPE_DECL for current_class_type, or one of its enclosing classes, and that type is a template, return the associated TEMPLATE_DECL. Otherwise, the original DECL is returned. Also handle the case when DECL is a TREE_LIST of ambiguous injected-class-names from different bases. */ tree maybe_get_template_decl_from_type_decl (tree decl) { if (decl == NULL_TREE) return decl; /* DR 176: A lookup that finds an injected-class-name (10.2 [class.member.lookup]) can result in an ambiguity in certain cases (for example, if it is found in more than one base class). If all of the injected-class-names that are found refer to specializations of the same class template, and if the name is followed by a template-argument-list, the reference refers to the class template itself and not a specialization thereof, and is not ambiguous. */ if (TREE_CODE (decl) == TREE_LIST) { tree t, tmpl = NULL_TREE; for (t = decl; t; t = TREE_CHAIN (t)) { tree elt = maybe_get_template_decl_from_type_decl (TREE_VALUE (t)); if (!tmpl) tmpl = elt; else if (tmpl != elt) break; } if (tmpl && t == NULL_TREE) return tmpl; else return decl; } return (decl != NULL_TREE && DECL_SELF_REFERENCE_P (decl) && CLASSTYPE_TEMPLATE_INFO (TREE_TYPE (decl))) ? CLASSTYPE_TI_TEMPLATE (TREE_TYPE (decl)) : decl; } /* Given an IDENTIFIER_NODE (or type TEMPLATE_DECL) and a chain of parameters, find the desired type. D1 is the PTYPENAME terminal, and ARGLIST is the list of arguments. IN_DECL, if non-NULL, is the template declaration we are trying to instantiate. If ENTERING_SCOPE is nonzero, we are about to enter the scope of the class we are looking up. Issue error and warning messages under control of COMPLAIN. If the template class is really a local class in a template function, then the FUNCTION_CONTEXT is the function in which it is being instantiated. ??? Note that this function is currently called *twice* for each template-id: the first time from the parser, while creating the incomplete type (finish_template_type), and the second type during the real instantiation (instantiate_template_class). This is surely something that we want to avoid. It also causes some problems with argument coercion (see convert_nontype_argument for more information on this). */ static tree lookup_template_class_1 (tree d1, tree arglist, tree in_decl, tree context, int entering_scope, tsubst_flags_t complain) { tree templ = NULL_TREE, parmlist; tree t; spec_entry **slot; spec_entry *entry; spec_entry elt; hashval_t hash; if (identifier_p (d1)) { tree value = innermost_non_namespace_value (d1); if (value && DECL_TEMPLATE_TEMPLATE_PARM_P (value)) templ = value; else { if (context) push_decl_namespace (context); templ = lookup_name (d1); templ = maybe_get_template_decl_from_type_decl (templ); if (context) pop_decl_namespace (); } if (templ) context = DECL_CONTEXT (templ); } else if (TREE_CODE (d1) == TYPE_DECL && MAYBE_CLASS_TYPE_P (TREE_TYPE (d1))) { tree type = TREE_TYPE (d1); /* If we are declaring a constructor, say A<T>::A<T>, we will get an implicit typename for the second A. Deal with it. */ if (TREE_CODE (type) == TYPENAME_TYPE && TREE_TYPE (type)) type = TREE_TYPE (type); if (CLASSTYPE_TEMPLATE_INFO (type)) { templ = CLASSTYPE_TI_TEMPLATE (type); d1 = DECL_NAME (templ); } } else if (TREE_CODE (d1) == ENUMERAL_TYPE || (TYPE_P (d1) && MAYBE_CLASS_TYPE_P (d1))) { templ = TYPE_TI_TEMPLATE (d1); d1 = DECL_NAME (templ); } else if (DECL_TYPE_TEMPLATE_P (d1)) { templ = d1; d1 = DECL_NAME (templ); context = DECL_CONTEXT (templ); } else if (DECL_TEMPLATE_TEMPLATE_PARM_P (d1)) { templ = d1; d1 = DECL_NAME (templ); } /* Issue an error message if we didn't find a template. */ if (! templ) { if (complain & tf_error) error ("%qT is not a template", d1); return error_mark_node; } if (TREE_CODE (templ) != TEMPLATE_DECL /* Make sure it's a user visible template, if it was named by the user. */ || ((complain & tf_user) && !DECL_TEMPLATE_PARM_P (templ) && !PRIMARY_TEMPLATE_P (templ))) { if (complain & tf_error) { error ("non-template type %qT used as a template", d1); if (in_decl) error ("for template declaration %q+D", in_decl); } return error_mark_node; } complain &= ~tf_user; /* An alias that just changes the name of a template is equivalent to the other template, so if any of the arguments are pack expansions, strip the alias to avoid problems with a pack expansion passed to a non-pack alias template parameter (DR 1430). */ if (pack_expansion_args_count (INNERMOST_TEMPLATE_ARGS (arglist))) templ = get_underlying_template (templ); if (DECL_TEMPLATE_TEMPLATE_PARM_P (templ)) { tree parm; tree arglist2 = coerce_template_args_for_ttp (templ, arglist, complain); if (arglist2 == error_mark_node || (!uses_template_parms (arglist2) && check_instantiated_args (templ, arglist2, complain))) return error_mark_node; parm = bind_template_template_parm (TREE_TYPE (templ), arglist2); return parm; } else { tree template_type = TREE_TYPE (templ); tree gen_tmpl; tree type_decl; tree found = NULL_TREE; int arg_depth; int parm_depth; int is_dependent_type; int use_partial_inst_tmpl = false; if (template_type == error_mark_node) /* An error occurred while building the template TEMPL, and a diagnostic has most certainly been emitted for that already. Let's propagate that error. */ return error_mark_node; gen_tmpl = most_general_template (templ); parmlist = DECL_TEMPLATE_PARMS (gen_tmpl); parm_depth = TMPL_PARMS_DEPTH (parmlist); arg_depth = TMPL_ARGS_DEPTH (arglist); if (arg_depth == 1 && parm_depth > 1) { /* We've been given an incomplete set of template arguments. For example, given: template <class T> struct S1 { template <class U> struct S2 {}; template <class U> struct S2<U*> {}; }; we will be called with an ARGLIST of `U*', but the TEMPLATE will be `template <class T> template <class U> struct S1<T>::S2'. We must fill in the missing arguments. */ tree ti = TYPE_TEMPLATE_INFO_MAYBE_ALIAS (TREE_TYPE (templ)); arglist = add_outermost_template_args (TI_ARGS (ti), arglist); arg_depth = TMPL_ARGS_DEPTH (arglist); } /* Now we should have enough arguments. */ gcc_assert (parm_depth == arg_depth); /* From here on, we're only interested in the most general template. */ /* Calculate the BOUND_ARGS. These will be the args that are actually tsubst'd into the definition to create the instantiation. */ arglist = coerce_innermost_template_parms (parmlist, arglist, gen_tmpl, complain, /*require_all_args=*/true, /*use_default_args=*/true); if (arglist == error_mark_node) /* We were unable to bind the arguments. */ return error_mark_node; /* In the scope of a template class, explicit references to the template class refer to the type of the template, not any instantiation of it. For example, in: template <class T> class C { void f(C<T>); } the `C<T>' is just the same as `C'. Outside of the class, however, such a reference is an instantiation. */ if (entering_scope || !PRIMARY_TEMPLATE_P (gen_tmpl) || currently_open_class (template_type)) { tree tinfo = TYPE_TEMPLATE_INFO (template_type); if (tinfo && comp_template_args (TI_ARGS (tinfo), arglist)) return template_type; } /* If we already have this specialization, return it. */ elt.tmpl = gen_tmpl; elt.args = arglist; elt.spec = NULL_TREE; hash = spec_hasher::hash (&elt); entry = type_specializations->find_with_hash (&elt, hash); if (entry) return entry->spec; /* If the the template's constraints are not satisfied, then we cannot form a valid type. Note that the check is deferred until after the hash lookup. This prevents redundant checks on previously instantiated specializations. */ if (flag_concepts && !constraints_satisfied_p (gen_tmpl, arglist)) { if (complain & tf_error) { error ("template constraint failure"); diagnose_constraints (input_location, gen_tmpl, arglist); } return error_mark_node; } is_dependent_type = uses_template_parms (arglist); /* If the deduced arguments are invalid, then the binding failed. */ if (!is_dependent_type && check_instantiated_args (gen_tmpl, INNERMOST_TEMPLATE_ARGS (arglist), complain)) return error_mark_node; if (!is_dependent_type && !PRIMARY_TEMPLATE_P (gen_tmpl) && !LAMBDA_TYPE_P (TREE_TYPE (gen_tmpl)) && TREE_CODE (CP_DECL_CONTEXT (gen_tmpl)) == NAMESPACE_DECL) { found = xref_tag_from_type (TREE_TYPE (gen_tmpl), DECL_NAME (gen_tmpl), /*tag_scope=*/ts_global); return found; } context = tsubst (DECL_CONTEXT (gen_tmpl), arglist, complain, in_decl); if (context == error_mark_node) return error_mark_node; if (!context) context = global_namespace; /* Create the type. */ if (DECL_ALIAS_TEMPLATE_P (gen_tmpl)) { /* The user referred to a specialization of an alias template represented by GEN_TMPL. [temp.alias]/2 says: When a template-id refers to the specialization of an alias template, it is equivalent to the associated type obtained by substitution of its template-arguments for the template-parameters in the type-id of the alias template. */ t = tsubst (TREE_TYPE (gen_tmpl), arglist, complain, in_decl); /* Note that the call above (by indirectly calling register_specialization in tsubst_decl) registers the TYPE_DECL representing the specialization of the alias template. So next time someone substitutes ARGLIST for the template parms into the alias template (GEN_TMPL), she'll get that TYPE_DECL back. */ if (t == error_mark_node) return t; } else if (TREE_CODE (template_type) == ENUMERAL_TYPE) { if (!is_dependent_type) { set_current_access_from_decl (TYPE_NAME (template_type)); t = start_enum (TYPE_IDENTIFIER (template_type), NULL_TREE, tsubst (ENUM_UNDERLYING_TYPE (template_type), arglist, complain, in_decl), tsubst_attributes (TYPE_ATTRIBUTES (template_type), arglist, complain, in_decl), SCOPED_ENUM_P (template_type), NULL); if (t == error_mark_node) return t; } else { /* We don't want to call start_enum for this type, since the values for the enumeration constants may involve template parameters. And, no one should be interested in the enumeration constants for such a type. */ t = cxx_make_type (ENUMERAL_TYPE); SET_SCOPED_ENUM_P (t, SCOPED_ENUM_P (template_type)); } SET_OPAQUE_ENUM_P (t, OPAQUE_ENUM_P (template_type)); ENUM_FIXED_UNDERLYING_TYPE_P (t) = ENUM_FIXED_UNDERLYING_TYPE_P (template_type); } else if (CLASS_TYPE_P (template_type)) { /* Lambda closures are regenerated in tsubst_lambda_expr, not instantiated here. */ gcc_assert (!LAMBDA_TYPE_P (template_type)); t = make_class_type (TREE_CODE (template_type)); CLASSTYPE_DECLARED_CLASS (t) = CLASSTYPE_DECLARED_CLASS (template_type); SET_CLASSTYPE_IMPLICIT_INSTANTIATION (t); /* A local class. Make sure the decl gets registered properly. */ if (context == current_function_decl) if (pushtag (DECL_NAME (gen_tmpl), t, /*tag_scope=*/ts_current) == error_mark_node) return error_mark_node; if (comp_template_args (CLASSTYPE_TI_ARGS (template_type), arglist)) /* This instantiation is another name for the primary template type. Set the TYPE_CANONICAL field appropriately. */ TYPE_CANONICAL (t) = template_type; else if (any_template_arguments_need_structural_equality_p (arglist)) /* Some of the template arguments require structural equality testing, so this template class requires structural equality testing. */ SET_TYPE_STRUCTURAL_EQUALITY (t); } else gcc_unreachable (); /* If we called start_enum or pushtag above, this information will already be set up. */ if (!TYPE_NAME (t)) { TYPE_CONTEXT (t) = FROB_CONTEXT (context); type_decl = create_implicit_typedef (DECL_NAME (gen_tmpl), t); DECL_CONTEXT (type_decl) = TYPE_CONTEXT (t); DECL_SOURCE_LOCATION (type_decl) = DECL_SOURCE_LOCATION (TYPE_STUB_DECL (template_type)); } else type_decl = TYPE_NAME (t); if (CLASS_TYPE_P (template_type)) { TREE_PRIVATE (type_decl) = TREE_PRIVATE (TYPE_MAIN_DECL (template_type)); TREE_PROTECTED (type_decl) = TREE_PROTECTED (TYPE_MAIN_DECL (template_type)); if (CLASSTYPE_VISIBILITY_SPECIFIED (template_type)) { DECL_VISIBILITY_SPECIFIED (type_decl) = 1; DECL_VISIBILITY (type_decl) = CLASSTYPE_VISIBILITY (template_type); } } if (OVERLOAD_TYPE_P (t) && !DECL_ALIAS_TEMPLATE_P (gen_tmpl)) { static const char *tags[] = {"abi_tag", "may_alias"}; for (unsigned ix = 0; ix != 2; ix++) { tree attributes = lookup_attribute (tags[ix], TYPE_ATTRIBUTES (template_type)); if (attributes) TYPE_ATTRIBUTES (t) = tree_cons (TREE_PURPOSE (attributes), TREE_VALUE (attributes), TYPE_ATTRIBUTES (t)); } } /* Let's consider the explicit specialization of a member of a class template specialization that is implicitly instantiated, e.g.: template<class T> struct S { template<class U> struct M {}; //#0 }; template<> template<> struct S<int>::M<char> //#1 { int i; }; [temp.expl.spec]/4 says this is valid. In this case, when we write: S<int>::M<char> m; M is instantiated from the CLASSTYPE_TI_TEMPLATE of #1, not from the one of #0. When we encounter #1, we want to store the partial instantiation of M (template<class T> S<int>::M<T>) in its CLASSTYPE_TI_TEMPLATE. For all cases other than this "explicit specialization of member of a class template", we just want to store the most general template into the CLASSTYPE_TI_TEMPLATE of M. This case of "explicit specialization of member of a class template" only happens when: 1/ the enclosing class is an instantiation of, and therefore not the same as, the context of the most general template, and 2/ we aren't looking at the partial instantiation itself, i.e. the innermost arguments are not the same as the innermost parms of the most general template. So it's only when 1/ and 2/ happens that we want to use the partial instantiation of the member template in lieu of its most general template. */ if (PRIMARY_TEMPLATE_P (gen_tmpl) && TMPL_ARGS_HAVE_MULTIPLE_LEVELS (arglist) /* the enclosing class must be an instantiation... */ && CLASS_TYPE_P (context) && !same_type_p (context, DECL_CONTEXT (gen_tmpl))) { TREE_VEC_LENGTH (arglist)--; ++processing_template_decl; tree tinfo = TYPE_TEMPLATE_INFO_MAYBE_ALIAS (TREE_TYPE (gen_tmpl)); tree partial_inst_args = tsubst (INNERMOST_TEMPLATE_ARGS (TI_ARGS (tinfo)), arglist, complain, NULL_TREE); --processing_template_decl; TREE_VEC_LENGTH (arglist)++; if (partial_inst_args == error_mark_node) return error_mark_node; use_partial_inst_tmpl = /*...and we must not be looking at the partial instantiation itself. */ !comp_template_args (INNERMOST_TEMPLATE_ARGS (arglist), partial_inst_args); } if (!use_partial_inst_tmpl) /* This case is easy; there are no member templates involved. */ found = gen_tmpl; else { /* This is a full instantiation of a member template. Find the partial instantiation of which this is an instance. */ /* Temporarily reduce by one the number of levels in the ARGLIST so as to avoid comparing the last set of arguments. */ TREE_VEC_LENGTH (arglist)--; found = tsubst (gen_tmpl, arglist, complain, NULL_TREE); TREE_VEC_LENGTH (arglist)++; /* FOUND is either a proper class type, or an alias template specialization. In the later case, it's a TYPE_DECL, resulting from the substituting of arguments for parameters in the TYPE_DECL of the alias template done earlier. So be careful while getting the template of FOUND. */ found = (TREE_CODE (found) == TEMPLATE_DECL ? found : (TREE_CODE (found) == TYPE_DECL ? DECL_TI_TEMPLATE (found) : CLASSTYPE_TI_TEMPLATE (found))); } // Build template info for the new specialization. SET_TYPE_TEMPLATE_INFO (t, build_template_info (found, arglist)); elt.spec = t; slot = type_specializations->find_slot_with_hash (&elt, hash, INSERT); entry = ggc_alloc<spec_entry> (); *entry = elt; *slot = entry; /* Note this use of the partial instantiation so we can check it later in maybe_process_partial_specialization. */ DECL_TEMPLATE_INSTANTIATIONS (found) = tree_cons (arglist, t, DECL_TEMPLATE_INSTANTIATIONS (found)); if (TREE_CODE (template_type) == ENUMERAL_TYPE && !is_dependent_type && !DECL_ALIAS_TEMPLATE_P (gen_tmpl)) /* Now that the type has been registered on the instantiations list, we set up the enumerators. Because the enumeration constants may involve the enumeration type itself, we make sure to register the type first, and then create the constants. That way, doing tsubst_expr for the enumeration constants won't result in recursive calls here; we'll find the instantiation and exit above. */ tsubst_enum (template_type, t, arglist); if (CLASS_TYPE_P (template_type) && is_dependent_type) /* If the type makes use of template parameters, the code that generates debugging information will crash. */ DECL_IGNORED_P (TYPE_MAIN_DECL (t)) = 1; /* Possibly limit visibility based on template args. */ TREE_PUBLIC (type_decl) = 1; determine_visibility (type_decl); inherit_targ_abi_tags (t); return t; } } /* Wrapper for lookup_template_class_1. */ tree lookup_template_class (tree d1, tree arglist, tree in_decl, tree context, int entering_scope, tsubst_flags_t complain) { tree ret; timevar_push (TV_TEMPLATE_INST); ret = lookup_template_class_1 (d1, arglist, in_decl, context, entering_scope, complain); timevar_pop (TV_TEMPLATE_INST); return ret; } /* Return a TEMPLATE_ID_EXPR for the given variable template and ARGLIST. */ tree lookup_template_variable (tree templ, tree arglist) { /* The type of the expression is NULL_TREE since the template-id could refer to an explicit or partial specialization. */ tree type = NULL_TREE; if (flag_concepts && variable_concept_p (templ)) /* Except that concepts are always bool. */ type = boolean_type_node; return build2 (TEMPLATE_ID_EXPR, type, templ, arglist); } /* Instantiate a variable declaration from a TEMPLATE_ID_EXPR for use. */ tree finish_template_variable (tree var, tsubst_flags_t complain) { tree templ = TREE_OPERAND (var, 0); tree arglist = TREE_OPERAND (var, 1); /* We never want to return a VAR_DECL for a variable concept, since they aren't instantiated. In a template, leave the TEMPLATE_ID_EXPR alone. */ bool concept_p = flag_concepts && variable_concept_p (templ); if (concept_p && processing_template_decl) return var; tree tmpl_args = DECL_TI_ARGS (DECL_TEMPLATE_RESULT (templ)); arglist = add_outermost_template_args (tmpl_args, arglist); templ = most_general_template (templ); tree parms = DECL_TEMPLATE_PARMS (templ); arglist = coerce_innermost_template_parms (parms, arglist, templ, complain, /*req_all*/true, /*use_default*/true); if (flag_concepts && !constraints_satisfied_p (templ, arglist)) { if (complain & tf_error) { error ("use of invalid variable template %qE", var); diagnose_constraints (location_of (var), templ, arglist); } return error_mark_node; } /* If a template-id refers to a specialization of a variable concept, then the expression is true if and only if the concept's constraints are satisfied by the given template arguments. NOTE: This is an extension of Concepts Lite TS that allows constraints to be used in expressions. */ if (concept_p) { tree decl = DECL_TEMPLATE_RESULT (templ); return evaluate_variable_concept (decl, arglist); } return instantiate_template (templ, arglist, complain); } /* Construct a TEMPLATE_ID_EXPR for the given variable template TEMPL having TARGS template args, and instantiate it if it's not dependent. */ tree lookup_and_finish_template_variable (tree templ, tree targs, tsubst_flags_t complain) { templ = lookup_template_variable (templ, targs); if (!any_dependent_template_arguments_p (targs)) { templ = finish_template_variable (templ, complain); mark_used (templ); } return convert_from_reference (templ); } struct pair_fn_data { tree_fn_t fn; tree_fn_t any_fn; void *data; /* True when we should also visit template parameters that occur in non-deduced contexts. */ bool include_nondeduced_p; hash_set<tree> *visited; }; /* Called from for_each_template_parm via walk_tree. */ static tree for_each_template_parm_r (tree *tp, int *walk_subtrees, void *d) { tree t = *tp; struct pair_fn_data *pfd = (struct pair_fn_data *) d; tree_fn_t fn = pfd->fn; void *data = pfd->data; tree result = NULL_TREE; #define WALK_SUBTREE(NODE) \ do \ { \ result = for_each_template_parm (NODE, fn, data, pfd->visited, \ pfd->include_nondeduced_p, \ pfd->any_fn); \ if (result) goto out; \ } \ while (0) if (pfd->any_fn && (*pfd->any_fn)(t, data)) return t; if (TYPE_P (t) && (pfd->include_nondeduced_p || TREE_CODE (t) != TYPENAME_TYPE)) WALK_SUBTREE (TYPE_CONTEXT (t)); switch (TREE_CODE (t)) { case RECORD_TYPE: if (TYPE_PTRMEMFUNC_P (t)) break; /* Fall through. */ case UNION_TYPE: case ENUMERAL_TYPE: if (!TYPE_TEMPLATE_INFO (t)) *walk_subtrees = 0; else WALK_SUBTREE (TYPE_TI_ARGS (t)); break; case INTEGER_TYPE: WALK_SUBTREE (TYPE_MIN_VALUE (t)); WALK_SUBTREE (TYPE_MAX_VALUE (t)); break; case METHOD_TYPE: /* Since we're not going to walk subtrees, we have to do this explicitly here. */ WALK_SUBTREE (TYPE_METHOD_BASETYPE (t)); /* Fall through. */ case FUNCTION_TYPE: /* Check the return type. */ WALK_SUBTREE (TREE_TYPE (t)); /* Check the parameter types. Since default arguments are not instantiated until they are needed, the TYPE_ARG_TYPES may contain expressions that involve template parameters. But, no-one should be looking at them yet. And, once they're instantiated, they don't contain template parameters, so there's no point in looking at them then, either. */ { tree parm; for (parm = TYPE_ARG_TYPES (t); parm; parm = TREE_CHAIN (parm)) WALK_SUBTREE (TREE_VALUE (parm)); /* Since we've already handled the TYPE_ARG_TYPES, we don't want walk_tree walking into them itself. */ *walk_subtrees = 0; } if (flag_noexcept_type) { tree spec = TYPE_RAISES_EXCEPTIONS (t); if (spec) WALK_SUBTREE (TREE_PURPOSE (spec)); } break; case TYPEOF_TYPE: case UNDERLYING_TYPE: if (pfd->include_nondeduced_p && for_each_template_parm (TYPE_VALUES_RAW (t), fn, data, pfd->visited, pfd->include_nondeduced_p, pfd->any_fn)) return error_mark_node; break; case FUNCTION_DECL: case VAR_DECL: if (DECL_LANG_SPECIFIC (t) && DECL_TEMPLATE_INFO (t)) WALK_SUBTREE (DECL_TI_ARGS (t)); /* Fall through. */ case PARM_DECL: case CONST_DECL: if (TREE_CODE (t) == CONST_DECL && DECL_TEMPLATE_PARM_P (t)) WALK_SUBTREE (DECL_INITIAL (t)); if (DECL_CONTEXT (t) && pfd->include_nondeduced_p) WALK_SUBTREE (DECL_CONTEXT (t)); break; case BOUND_TEMPLATE_TEMPLATE_PARM: /* Record template parameters such as `T' inside `TT<T>'. */ WALK_SUBTREE (TYPE_TI_ARGS (t)); /* Fall through. */ case TEMPLATE_TEMPLATE_PARM: case TEMPLATE_TYPE_PARM: case TEMPLATE_PARM_INDEX: if (fn && (*fn)(t, data)) return t; else if (!fn) return t; break; case TEMPLATE_DECL: /* A template template parameter is encountered. */ if (DECL_TEMPLATE_TEMPLATE_PARM_P (t)) WALK_SUBTREE (TREE_TYPE (t)); /* Already substituted template template parameter */ *walk_subtrees = 0; break; case TYPENAME_TYPE: /* A template-id in a TYPENAME_TYPE might be a deduced context after partial instantiation. */ WALK_SUBTREE (TYPENAME_TYPE_FULLNAME (t)); break; case CONSTRUCTOR: if (TREE_TYPE (t) && TYPE_PTRMEMFUNC_P (TREE_TYPE (t)) && pfd->include_nondeduced_p) WALK_SUBTREE (TYPE_PTRMEMFUNC_FN_TYPE (TREE_TYPE (t))); break; case INDIRECT_REF: case COMPONENT_REF: /* If there's no type, then this thing must be some expression involving template parameters. */ if (!fn && !TREE_TYPE (t)) return error_mark_node; break; case MODOP_EXPR: case CAST_EXPR: case IMPLICIT_CONV_EXPR: case REINTERPRET_CAST_EXPR: case CONST_CAST_EXPR: case STATIC_CAST_EXPR: case DYNAMIC_CAST_EXPR: case ARROW_EXPR: case DOTSTAR_EXPR: case TYPEID_EXPR: case PSEUDO_DTOR_EXPR: if (!fn) return error_mark_node; break; default: break; } #undef WALK_SUBTREE /* We didn't find any template parameters we liked. */ out: return result; } /* For each TEMPLATE_TYPE_PARM, TEMPLATE_TEMPLATE_PARM, BOUND_TEMPLATE_TEMPLATE_PARM or TEMPLATE_PARM_INDEX in T, call FN with the parameter and the DATA. If FN returns nonzero, the iteration is terminated, and for_each_template_parm returns 1. Otherwise, the iteration continues. If FN never returns a nonzero value, the value returned by for_each_template_parm is 0. If FN is NULL, it is considered to be the function which always returns 1. If INCLUDE_NONDEDUCED_P, then this routine will also visit template parameters that occur in non-deduced contexts. When false, only visits those template parameters that can be deduced. */ static tree for_each_template_parm (tree t, tree_fn_t fn, void* data, hash_set<tree> *visited, bool include_nondeduced_p, tree_fn_t any_fn) { struct pair_fn_data pfd; tree result; /* Set up. */ pfd.fn = fn; pfd.any_fn = any_fn; pfd.data = data; pfd.include_nondeduced_p = include_nondeduced_p; /* Walk the tree. (Conceptually, we would like to walk without duplicates, but for_each_template_parm_r recursively calls for_each_template_parm, so we would need to reorganize a fair bit to use walk_tree_without_duplicates, so we keep our own visited list.) */ if (visited) pfd.visited = visited; else pfd.visited = new hash_set<tree>; result = cp_walk_tree (&t, for_each_template_parm_r, &pfd, pfd.visited); /* Clean up. */ if (!visited) { delete pfd.visited; pfd.visited = 0; } return result; } /* Returns true if T depends on any template parameter. */ int uses_template_parms (tree t) { if (t == NULL_TREE) return false; bool dependent_p; int saved_processing_template_decl; saved_processing_template_decl = processing_template_decl; if (!saved_processing_template_decl) processing_template_decl = 1; if (TYPE_P (t)) dependent_p = dependent_type_p (t); else if (TREE_CODE (t) == TREE_VEC) dependent_p = any_dependent_template_arguments_p (t); else if (TREE_CODE (t) == TREE_LIST) dependent_p = (uses_template_parms (TREE_VALUE (t)) || uses_template_parms (TREE_CHAIN (t))); else if (TREE_CODE (t) == TYPE_DECL) dependent_p = dependent_type_p (TREE_TYPE (t)); else if (DECL_P (t) || EXPR_P (t) || TREE_CODE (t) == TEMPLATE_PARM_INDEX || TREE_CODE (t) == OVERLOAD || BASELINK_P (t) || identifier_p (t) || TREE_CODE (t) == TRAIT_EXPR || TREE_CODE (t) == CONSTRUCTOR || CONSTANT_CLASS_P (t)) dependent_p = (type_dependent_expression_p (t) || value_dependent_expression_p (t)); else { gcc_assert (t == error_mark_node); dependent_p = false; } processing_template_decl = saved_processing_template_decl; return dependent_p; } /* Returns true iff current_function_decl is an incompletely instantiated template. Useful instead of processing_template_decl because the latter is set to 0 during instantiate_non_dependent_expr. */ bool in_template_function (void) { tree fn = current_function_decl; bool ret; ++processing_template_decl; ret = (fn && DECL_LANG_SPECIFIC (fn) && DECL_TEMPLATE_INFO (fn) && any_dependent_template_arguments_p (DECL_TI_ARGS (fn))); --processing_template_decl; return ret; } /* Returns true if T depends on any template parameter with level LEVEL. */ bool uses_template_parms_level (tree t, int level) { return for_each_template_parm (t, template_parm_this_level_p, &level, NULL, /*include_nondeduced_p=*/true); } /* Returns true if the signature of DECL depends on any template parameter from its enclosing class. */ bool uses_outer_template_parms (tree decl) { int depth = template_class_depth (CP_DECL_CONTEXT (decl)); if (depth == 0) return false; if (for_each_template_parm (TREE_TYPE (decl), template_parm_outer_level, &depth, NULL, /*include_nondeduced_p=*/true)) return true; if (PRIMARY_TEMPLATE_P (decl) && for_each_template_parm (INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (decl)), template_parm_outer_level, &depth, NULL, /*include_nondeduced_p=*/true)) return true; tree ci = get_constraints (decl); if (ci) ci = CI_ASSOCIATED_CONSTRAINTS (ci); if (ci && for_each_template_parm (ci, template_parm_outer_level, &depth, NULL, /*nondeduced*/true)) return true; return false; } /* Returns TRUE iff INST is an instantiation we don't need to do in an ill-formed translation unit, i.e. a variable or function that isn't usable in a constant expression. */ static inline bool neglectable_inst_p (tree d) { return (d && DECL_P (d) && !undeduced_auto_decl (d) && !(TREE_CODE (d) == FUNCTION_DECL ? DECL_DECLARED_CONSTEXPR_P (d) : decl_maybe_constant_var_p (d))); } /* Returns TRUE iff we should refuse to instantiate DECL because it's neglectable and instantiated from within an erroneous instantiation. */ static bool limit_bad_template_recursion (tree decl) { struct tinst_level *lev = current_tinst_level; int errs = errorcount + sorrycount; if (lev == NULL || errs == 0 || !neglectable_inst_p (decl)) return false; for (; lev; lev = lev->next) if (neglectable_inst_p (lev->maybe_get_node ())) break; return (lev && errs > lev->errors); } static int tinst_depth; extern int max_tinst_depth; int depth_reached; static GTY(()) struct tinst_level *last_error_tinst_level; /* We're starting to instantiate D; record the template instantiation context at LOC for diagnostics and to restore it later. */ static bool push_tinst_level_loc (tree tldcl, tree targs, location_t loc) { struct tinst_level *new_level; if (tinst_depth >= max_tinst_depth) { /* Tell error.c not to try to instantiate any templates. */ at_eof = 2; fatal_error (input_location, "template instantiation depth exceeds maximum of %d" " (use -ftemplate-depth= to increase the maximum)", max_tinst_depth); return false; } /* If the current instantiation caused problems, don't let it instantiate anything else. Do allow deduction substitution and decls usable in constant expressions. */ if (!targs && limit_bad_template_recursion (tldcl)) return false; /* When not -quiet, dump template instantiations other than functions, since announce_function will take care of those. */ if (!quiet_flag && !targs && TREE_CODE (tldcl) != TREE_LIST && TREE_CODE (tldcl) != FUNCTION_DECL) fprintf (stderr, " %s", decl_as_string (tldcl, TFF_DECL_SPECIFIERS)); new_level = tinst_level_freelist ().alloc (); new_level->tldcl = tldcl; new_level->targs = targs; new_level->locus = loc; new_level->errors = errorcount + sorrycount; new_level->next = NULL; new_level->refcount = 0; set_refcount_ptr (new_level->next, current_tinst_level); set_refcount_ptr (current_tinst_level, new_level); ++tinst_depth; if (GATHER_STATISTICS && (tinst_depth > depth_reached)) depth_reached = tinst_depth; return true; } /* We're starting substitution of TMPL<ARGS>; record the template substitution context for diagnostics and to restore it later. */ static bool push_tinst_level (tree tmpl, tree args) { return push_tinst_level_loc (tmpl, args, input_location); } /* We're starting to instantiate D; record INPUT_LOCATION and the template instantiation context for diagnostics and to restore it later. */ bool push_tinst_level (tree d) { return push_tinst_level_loc (d, input_location); } /* Likewise, but record LOC as the program location. */ bool push_tinst_level_loc (tree d, location_t loc) { gcc_assert (TREE_CODE (d) != TREE_LIST); return push_tinst_level_loc (d, NULL, loc); } /* We're done instantiating this template; return to the instantiation context. */ void pop_tinst_level (void) { /* Restore the filename and line number stashed away when we started this instantiation. */ input_location = current_tinst_level->locus; set_refcount_ptr (current_tinst_level, current_tinst_level->next); --tinst_depth; } /* We're instantiating a deferred template; restore the template instantiation context in which the instantiation was requested, which is one step out from LEVEL. Return the corresponding DECL or TYPE. */ static tree reopen_tinst_level (struct tinst_level *level) { struct tinst_level *t; tinst_depth = 0; for (t = level; t; t = t->next) ++tinst_depth; set_refcount_ptr (current_tinst_level, level); pop_tinst_level (); if (current_tinst_level) current_tinst_level->errors = errorcount+sorrycount; return level->maybe_get_node (); } /* Returns the TINST_LEVEL which gives the original instantiation context. */ struct tinst_level * outermost_tinst_level (void) { struct tinst_level *level = current_tinst_level; if (level) while (level->next) level = level->next; return level; } /* DECL is a friend FUNCTION_DECL or TEMPLATE_DECL. ARGS is the vector of template arguments, as for tsubst. Returns an appropriate tsubst'd friend declaration. */ static tree tsubst_friend_function (tree decl, tree args) { tree new_friend; if (TREE_CODE (decl) == FUNCTION_DECL && DECL_TEMPLATE_INSTANTIATION (decl) && TREE_CODE (DECL_TI_TEMPLATE (decl)) != TEMPLATE_DECL) /* This was a friend declared with an explicit template argument list, e.g.: friend void f<>(T); to indicate that f was a template instantiation, not a new function declaration. Now, we have to figure out what instantiation of what template. */ { tree template_id, arglist, fns; tree new_args; tree tmpl; tree ns = decl_namespace_context (TYPE_MAIN_DECL (current_class_type)); /* Friend functions are looked up in the containing namespace scope. We must enter that scope, to avoid finding member functions of the current class with same name. */ push_nested_namespace (ns); fns = tsubst_expr (DECL_TI_TEMPLATE (decl), args, tf_warning_or_error, NULL_TREE, /*integral_constant_expression_p=*/false); pop_nested_namespace (ns); arglist = tsubst (DECL_TI_ARGS (decl), args, tf_warning_or_error, NULL_TREE); template_id = lookup_template_function (fns, arglist); new_friend = tsubst (decl, args, tf_warning_or_error, NULL_TREE); tmpl = determine_specialization (template_id, new_friend, &new_args, /*need_member_template=*/0, TREE_VEC_LENGTH (args), tsk_none); return instantiate_template (tmpl, new_args, tf_error); } new_friend = tsubst (decl, args, tf_warning_or_error, NULL_TREE); /* The NEW_FRIEND will look like an instantiation, to the compiler, but is not an instantiation from the point of view of the language. For example, we might have had: template <class T> struct S { template <class U> friend void f(T, U); }; Then, in S<int>, template <class U> void f(int, U) is not an instantiation of anything. */ if (new_friend == error_mark_node) return error_mark_node; DECL_USE_TEMPLATE (new_friend) = 0; if (TREE_CODE (decl) == TEMPLATE_DECL) { DECL_USE_TEMPLATE (DECL_TEMPLATE_RESULT (new_friend)) = 0; DECL_SAVED_TREE (DECL_TEMPLATE_RESULT (new_friend)) = DECL_SAVED_TREE (DECL_TEMPLATE_RESULT (decl)); } /* The mangled name for the NEW_FRIEND is incorrect. The function is not a template instantiation and should not be mangled like one. Therefore, we forget the mangling here; we'll recompute it later if we need it. */ if (TREE_CODE (new_friend) != TEMPLATE_DECL) { SET_DECL_RTL (new_friend, NULL); SET_DECL_ASSEMBLER_NAME (new_friend, NULL_TREE); } if (DECL_NAMESPACE_SCOPE_P (new_friend)) { tree old_decl; tree new_friend_template_info; tree new_friend_result_template_info; tree ns; int new_friend_is_defn; /* We must save some information from NEW_FRIEND before calling duplicate decls since that function will free NEW_FRIEND if possible. */ new_friend_template_info = DECL_TEMPLATE_INFO (new_friend); new_friend_is_defn = (DECL_INITIAL (DECL_TEMPLATE_RESULT (template_for_substitution (new_friend))) != NULL_TREE); if (TREE_CODE (new_friend) == TEMPLATE_DECL) { /* This declaration is a `primary' template. */ DECL_PRIMARY_TEMPLATE (new_friend) = new_friend; new_friend_result_template_info = DECL_TEMPLATE_INFO (DECL_TEMPLATE_RESULT (new_friend)); } else new_friend_result_template_info = NULL_TREE; /* Inside pushdecl_namespace_level, we will push into the current namespace. However, the friend function should go into the namespace of the template. */ ns = decl_namespace_context (new_friend); push_nested_namespace (ns); old_decl = pushdecl_namespace_level (new_friend, /*is_friend=*/true); pop_nested_namespace (ns); if (old_decl == error_mark_node) return error_mark_node; if (old_decl != new_friend) { /* This new friend declaration matched an existing declaration. For example, given: template <class T> void f(T); template <class U> class C { template <class T> friend void f(T) {} }; the friend declaration actually provides the definition of `f', once C has been instantiated for some type. So, old_decl will be the out-of-class template declaration, while new_friend is the in-class definition. But, if `f' was called before this point, the instantiation of `f' will have DECL_TI_ARGS corresponding to `T' but not to `U', references to which might appear in the definition of `f'. Previously, the most general template for an instantiation of `f' was the out-of-class version; now it is the in-class version. Therefore, we run through all specialization of `f', adding to their DECL_TI_ARGS appropriately. In particular, they need a new set of outer arguments, corresponding to the arguments for this class instantiation. The same situation can arise with something like this: friend void f(int); template <class T> class C { friend void f(T) {} }; when `C<int>' is instantiated. Now, `f(int)' is defined in the class. */ if (!new_friend_is_defn) /* On the other hand, if the in-class declaration does *not* provide a definition, then we don't want to alter existing definitions. We can just leave everything alone. */ ; else { tree new_template = TI_TEMPLATE (new_friend_template_info); tree new_args = TI_ARGS (new_friend_template_info); /* Overwrite whatever template info was there before, if any, with the new template information pertaining to the declaration. */ DECL_TEMPLATE_INFO (old_decl) = new_friend_template_info; if (TREE_CODE (old_decl) != TEMPLATE_DECL) { /* We should have called reregister_specialization in duplicate_decls. */ gcc_assert (retrieve_specialization (new_template, new_args, 0) == old_decl); /* Instantiate it if the global has already been used. */ if (DECL_ODR_USED (old_decl)) instantiate_decl (old_decl, /*defer_ok=*/true, /*expl_inst_class_mem_p=*/false); } else { tree t; /* Indicate that the old function template is a partial instantiation. */ DECL_TEMPLATE_INFO (DECL_TEMPLATE_RESULT (old_decl)) = new_friend_result_template_info; gcc_assert (new_template == most_general_template (new_template)); gcc_assert (new_template != old_decl); /* Reassign any specializations already in the hash table to the new more general template, and add the additional template args. */ for (t = DECL_TEMPLATE_INSTANTIATIONS (old_decl); t != NULL_TREE; t = TREE_CHAIN (t)) { tree spec = TREE_VALUE (t); spec_entry elt; elt.tmpl = old_decl; elt.args = DECL_TI_ARGS (spec); elt.spec = NULL_TREE; decl_specializations->remove_elt (&elt); DECL_TI_ARGS (spec) = add_outermost_template_args (new_args, DECL_TI_ARGS (spec)); register_specialization (spec, new_template, DECL_TI_ARGS (spec), true, 0); } DECL_TEMPLATE_INSTANTIATIONS (old_decl) = NULL_TREE; } } /* The information from NEW_FRIEND has been merged into OLD_DECL by duplicate_decls. */ new_friend = old_decl; } } else { tree context = DECL_CONTEXT (new_friend); bool dependent_p; /* In the code template <class T> class C { template <class U> friend void C1<U>::f (); // case 1 friend void C2<T>::f (); // case 2 }; we only need to make sure CONTEXT is a complete type for case 2. To distinguish between the two cases, we note that CONTEXT of case 1 remains dependent type after tsubst while this isn't true for case 2. */ ++processing_template_decl; dependent_p = dependent_type_p (context); --processing_template_decl; if (!dependent_p && !complete_type_or_else (context, NULL_TREE)) return error_mark_node; if (COMPLETE_TYPE_P (context)) { tree fn = new_friend; /* do_friend adds the TEMPLATE_DECL for any member friend template even if it isn't a member template, i.e. template <class T> friend A<T>::f(); Look through it in that case. */ if (TREE_CODE (fn) == TEMPLATE_DECL && !PRIMARY_TEMPLATE_P (fn)) fn = DECL_TEMPLATE_RESULT (fn); /* Check to see that the declaration is really present, and, possibly obtain an improved declaration. */ fn = check_classfn (context, fn, NULL_TREE); if (fn) new_friend = fn; } } return new_friend; } /* FRIEND_TMPL is a friend TEMPLATE_DECL. ARGS is the vector of template arguments, as for tsubst. Returns an appropriate tsubst'd friend type or error_mark_node on failure. */ static tree tsubst_friend_class (tree friend_tmpl, tree args) { tree tmpl; if (DECL_TEMPLATE_TEMPLATE_PARM_P (friend_tmpl)) { tmpl = tsubst (TREE_TYPE (friend_tmpl), args, tf_none, NULL_TREE); return TREE_TYPE (tmpl); } tree context = CP_DECL_CONTEXT (friend_tmpl); if (TREE_CODE (context) == NAMESPACE_DECL) push_nested_namespace (context); else push_nested_class (context); tmpl = lookup_name_real (DECL_NAME (friend_tmpl), /*prefer_type=*/false, /*non_class=*/false, /*block_p=*/false, /*namespaces_only=*/false, LOOKUP_HIDDEN); if (tmpl && DECL_CLASS_TEMPLATE_P (tmpl)) { /* The friend template has already been declared. Just check to see that the declarations match, and install any new default parameters. We must tsubst the default parameters, of course. We only need the innermost template parameters because that is all that redeclare_class_template will look at. */ if (TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (friend_tmpl)) > TMPL_ARGS_DEPTH (args)) { tree parms = tsubst_template_parms (DECL_TEMPLATE_PARMS (friend_tmpl), args, tf_warning_or_error); location_t saved_input_location = input_location; input_location = DECL_SOURCE_LOCATION (friend_tmpl); tree cons = get_constraints (tmpl); redeclare_class_template (TREE_TYPE (tmpl), parms, cons); input_location = saved_input_location; } } else { /* The friend template has not already been declared. In this case, the instantiation of the template class will cause the injection of this template into the namespace scope. */ tmpl = tsubst (friend_tmpl, args, tf_warning_or_error, NULL_TREE); if (tmpl != error_mark_node) { /* The new TMPL is not an instantiation of anything, so we forget its origins. We don't reset CLASSTYPE_TI_TEMPLATE for the new type because that is supposed to be the corresponding template decl, i.e., TMPL. */ DECL_USE_TEMPLATE (tmpl) = 0; DECL_TEMPLATE_INFO (tmpl) = NULL_TREE; CLASSTYPE_USE_TEMPLATE (TREE_TYPE (tmpl)) = 0; CLASSTYPE_TI_ARGS (TREE_TYPE (tmpl)) = INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (TREE_TYPE (tmpl))); /* It is hidden. */ retrofit_lang_decl (DECL_TEMPLATE_RESULT (tmpl)); DECL_ANTICIPATED (tmpl) = DECL_ANTICIPATED (DECL_TEMPLATE_RESULT (tmpl)) = true; /* Inject this template into the enclosing namspace scope. */ tmpl = pushdecl_namespace_level (tmpl, true); } } if (TREE_CODE (context) == NAMESPACE_DECL) pop_nested_namespace (context); else pop_nested_class (); return TREE_TYPE (tmpl); } /* Returns zero if TYPE cannot be completed later due to circularity. Otherwise returns one. */ static int can_complete_type_without_circularity (tree type) { if (type == NULL_TREE || type == error_mark_node) return 0; else if (COMPLETE_TYPE_P (type)) return 1; else if (TREE_CODE (type) == ARRAY_TYPE) return can_complete_type_without_circularity (TREE_TYPE (type)); else if (CLASS_TYPE_P (type) && TYPE_BEING_DEFINED (TYPE_MAIN_VARIANT (type))) return 0; else return 1; } static tree tsubst_omp_clauses (tree, enum c_omp_region_type, tree, tsubst_flags_t, tree); /* Instantiate a single dependent attribute T (a TREE_LIST), and return either T or a new TREE_LIST, possibly a chain in the case of a pack expansion. */ static tree tsubst_attribute (tree t, tree *decl_p, tree args, tsubst_flags_t complain, tree in_decl) { gcc_assert (ATTR_IS_DEPENDENT (t)); tree val = TREE_VALUE (t); if (val == NULL_TREE) /* Nothing to do. */; else if ((flag_openmp || flag_openmp_simd) && is_attribute_p ("omp declare simd", get_attribute_name (t))) { tree clauses = TREE_VALUE (val); clauses = tsubst_omp_clauses (clauses, C_ORT_OMP_DECLARE_SIMD, args, complain, in_decl); c_omp_declare_simd_clauses_to_decls (*decl_p, clauses); clauses = finish_omp_clauses (clauses, C_ORT_OMP_DECLARE_SIMD); tree parms = DECL_ARGUMENTS (*decl_p); clauses = c_omp_declare_simd_clauses_to_numbers (parms, clauses); if (clauses) val = build_tree_list (NULL_TREE, clauses); else val = NULL_TREE; } /* If the first attribute argument is an identifier, don't pass it through tsubst. Attributes like mode, format, cleanup and several target specific attributes expect it unmodified. */ else if (attribute_takes_identifier_p (get_attribute_name (t))) { tree chain = tsubst_expr (TREE_CHAIN (val), args, complain, in_decl, /*integral_constant_expression_p=*/false); if (chain != TREE_CHAIN (val)) val = tree_cons (NULL_TREE, TREE_VALUE (val), chain); } else if (PACK_EXPANSION_P (val)) { /* An attribute pack expansion. */ tree purp = TREE_PURPOSE (t); tree pack = tsubst_pack_expansion (val, args, complain, in_decl); if (pack == error_mark_node) return error_mark_node; int len = TREE_VEC_LENGTH (pack); tree list = NULL_TREE; tree *q = &list; for (int i = 0; i < len; ++i) { tree elt = TREE_VEC_ELT (pack, i); *q = build_tree_list (purp, elt); q = &TREE_CHAIN (*q); } return list; } else val = tsubst_expr (val, args, complain, in_decl, /*integral_constant_expression_p=*/false); if (val != TREE_VALUE (t)) return build_tree_list (TREE_PURPOSE (t), val); return t; } /* Instantiate any dependent attributes in ATTRIBUTES, returning either it unchanged or a new TREE_LIST chain. */ static tree tsubst_attributes (tree attributes, tree args, tsubst_flags_t complain, tree in_decl) { tree last_dep = NULL_TREE; for (tree t = attributes; t; t = TREE_CHAIN (t)) if (ATTR_IS_DEPENDENT (t)) { last_dep = t; attributes = copy_list (attributes); break; } if (last_dep) for (tree *p = &attributes; *p; ) { tree t = *p; if (ATTR_IS_DEPENDENT (t)) { tree subst = tsubst_attribute (t, NULL, args, complain, in_decl); if (subst != t) { *p = subst; while (*p) p = &TREE_CHAIN (*p); *p = TREE_CHAIN (t); continue; } } p = &TREE_CHAIN (*p); } return attributes; } /* Apply any attributes which had to be deferred until instantiation time. DECL_P, ATTRIBUTES and ATTR_FLAGS are as cplus_decl_attributes; ARGS, COMPLAIN, IN_DECL are as tsubst. */ static void apply_late_template_attributes (tree *decl_p, tree attributes, int attr_flags, tree args, tsubst_flags_t complain, tree in_decl) { tree last_dep = NULL_TREE; tree t; tree *p; if (attributes == NULL_TREE) return; if (DECL_P (*decl_p)) { if (TREE_TYPE (*decl_p) == error_mark_node) return; p = &DECL_ATTRIBUTES (*decl_p); /* DECL_ATTRIBUTES comes from copy_node in tsubst_decl, and is identical to our attributes parameter. */ gcc_assert (*p == attributes); } else { p = &TYPE_ATTRIBUTES (*decl_p); /* TYPE_ATTRIBUTES was set up (with abi_tag and may_alias) in lookup_template_class_1, and should be preserved. */ gcc_assert (*p != attributes); while (*p) p = &TREE_CHAIN (*p); } for (t = attributes; t; t = TREE_CHAIN (t)) if (ATTR_IS_DEPENDENT (t)) { last_dep = t; attributes = copy_list (attributes); break; } *p = attributes; if (last_dep) { tree late_attrs = NULL_TREE; tree *q = &late_attrs; for (; *p; ) { t = *p; if (ATTR_IS_DEPENDENT (t)) { *p = TREE_CHAIN (t); TREE_CHAIN (t) = NULL_TREE; *q = tsubst_attribute (t, decl_p, args, complain, in_decl); while (*q) q = &TREE_CHAIN (*q); } else p = &TREE_CHAIN (t); } cplus_decl_attributes (decl_p, late_attrs, attr_flags); } } /* Perform (or defer) access check for typedefs that were referenced from within the template TMPL code. This is a subroutine of instantiate_decl and instantiate_class_template. TMPL is the template to consider and TARGS is the list of arguments of that template. */ static void perform_typedefs_access_check (tree tmpl, tree targs) { location_t saved_location; unsigned i; qualified_typedef_usage_t *iter; if (!tmpl || (!CLASS_TYPE_P (tmpl) && TREE_CODE (tmpl) != FUNCTION_DECL)) return; saved_location = input_location; FOR_EACH_VEC_SAFE_ELT (get_types_needing_access_check (tmpl), i, iter) { tree type_decl = iter->typedef_decl; tree type_scope = iter->context; if (!type_decl || !type_scope || !CLASS_TYPE_P (type_scope)) continue; if (uses_template_parms (type_decl)) type_decl = tsubst (type_decl, targs, tf_error, NULL_TREE); if (uses_template_parms (type_scope)) type_scope = tsubst (type_scope, targs, tf_error, NULL_TREE); /* Make access check error messages point to the location of the use of the typedef. */ input_location = iter->locus; perform_or_defer_access_check (TYPE_BINFO (type_scope), type_decl, type_decl, tf_warning_or_error); } input_location = saved_location; } static tree instantiate_class_template_1 (tree type) { tree templ, args, pattern, t, member; tree typedecl; tree pbinfo; tree base_list; unsigned int saved_maximum_field_alignment; tree fn_context; if (type == error_mark_node) return error_mark_node; if (COMPLETE_OR_OPEN_TYPE_P (type) || uses_template_parms (type)) return type; /* Figure out which template is being instantiated. */ templ = most_general_template (CLASSTYPE_TI_TEMPLATE (type)); gcc_assert (TREE_CODE (templ) == TEMPLATE_DECL); /* Mark the type as in the process of being defined. */ TYPE_BEING_DEFINED (type) = 1; /* Determine what specialization of the original template to instantiate. */ t = most_specialized_partial_spec (type, tf_warning_or_error); if (t == error_mark_node) return error_mark_node; else if (t) { /* This TYPE is actually an instantiation of a partial specialization. We replace the innermost set of ARGS with the arguments appropriate for substitution. For example, given: template <class T> struct S {}; template <class T> struct S<T*> {}; and supposing that we are instantiating S<int*>, ARGS will presently be {int*} -- but we need {int}. */ pattern = TREE_TYPE (t); args = TREE_PURPOSE (t); } else { pattern = TREE_TYPE (templ); args = CLASSTYPE_TI_ARGS (type); } /* If the template we're instantiating is incomplete, then clearly there's nothing we can do. */ if (!COMPLETE_TYPE_P (pattern)) { /* We can try again later. */ TYPE_BEING_DEFINED (type) = 0; return type; } /* If we've recursively instantiated too many templates, stop. */ if (! push_tinst_level (type)) return type; /* We may be in the middle of deferred access check. Disable it now. */ push_deferring_access_checks (dk_no_deferred); int saved_unevaluated_operand = cp_unevaluated_operand; int saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings; fn_context = decl_function_context (TYPE_MAIN_DECL (type)); /* Also avoid push_to_top_level for a lambda in an NSDMI. */ if (!fn_context && LAMBDA_TYPE_P (type) && TYPE_CLASS_SCOPE_P (type)) fn_context = error_mark_node; if (!fn_context) push_to_top_level (); else { cp_unevaluated_operand = 0; c_inhibit_evaluation_warnings = 0; } /* Use #pragma pack from the template context. */ saved_maximum_field_alignment = maximum_field_alignment; maximum_field_alignment = TYPE_PRECISION (pattern); SET_CLASSTYPE_INTERFACE_UNKNOWN (type); /* Set the input location to the most specialized template definition. This is needed if tsubsting causes an error. */ typedecl = TYPE_MAIN_DECL (pattern); input_location = DECL_SOURCE_LOCATION (TYPE_NAME (type)) = DECL_SOURCE_LOCATION (typedecl); TYPE_PACKED (type) = TYPE_PACKED (pattern); SET_TYPE_ALIGN (type, TYPE_ALIGN (pattern)); TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (pattern); CLASSTYPE_NON_AGGREGATE (type) = CLASSTYPE_NON_AGGREGATE (pattern); if (ANON_AGGR_TYPE_P (pattern)) SET_ANON_AGGR_TYPE_P (type); if (CLASSTYPE_VISIBILITY_SPECIFIED (pattern)) { CLASSTYPE_VISIBILITY_SPECIFIED (type) = 1; CLASSTYPE_VISIBILITY (type) = CLASSTYPE_VISIBILITY (pattern); /* Adjust visibility for template arguments. */ determine_visibility (TYPE_MAIN_DECL (type)); } if (CLASS_TYPE_P (type)) CLASSTYPE_FINAL (type) = CLASSTYPE_FINAL (pattern); pbinfo = TYPE_BINFO (pattern); /* We should never instantiate a nested class before its enclosing class; we need to look up the nested class by name before we can instantiate it, and that lookup should instantiate the enclosing class. */ gcc_assert (!DECL_CLASS_SCOPE_P (TYPE_MAIN_DECL (pattern)) || COMPLETE_OR_OPEN_TYPE_P (TYPE_CONTEXT (type))); base_list = NULL_TREE; if (BINFO_N_BASE_BINFOS (pbinfo)) { tree pbase_binfo; tree pushed_scope; int i; /* We must enter the scope containing the type, as that is where the accessibility of types named in dependent bases are looked up from. */ pushed_scope = push_scope (CP_TYPE_CONTEXT (type)); /* Substitute into each of the bases to determine the actual basetypes. */ for (i = 0; BINFO_BASE_ITERATE (pbinfo, i, pbase_binfo); i++) { tree base; tree access = BINFO_BASE_ACCESS (pbinfo, i); tree expanded_bases = NULL_TREE; int idx, len = 1; if (PACK_EXPANSION_P (BINFO_TYPE (pbase_binfo))) { expanded_bases = tsubst_pack_expansion (BINFO_TYPE (pbase_binfo), args, tf_error, NULL_TREE); if (expanded_bases == error_mark_node) continue; len = TREE_VEC_LENGTH (expanded_bases); } for (idx = 0; idx < len; idx++) { if (expanded_bases) /* Extract the already-expanded base class. */ base = TREE_VEC_ELT (expanded_bases, idx); else /* Substitute to figure out the base class. */ base = tsubst (BINFO_TYPE (pbase_binfo), args, tf_error, NULL_TREE); if (base == error_mark_node) continue; base_list = tree_cons (access, base, base_list); if (BINFO_VIRTUAL_P (pbase_binfo)) TREE_TYPE (base_list) = integer_type_node; } } /* The list is now in reverse order; correct that. */ base_list = nreverse (base_list); if (pushed_scope) pop_scope (pushed_scope); } /* Now call xref_basetypes to set up all the base-class information. */ xref_basetypes (type, base_list); apply_late_template_attributes (&type, TYPE_ATTRIBUTES (pattern), (int) ATTR_FLAG_TYPE_IN_PLACE, args, tf_error, NULL_TREE); fixup_attribute_variants (type); /* Now that our base classes are set up, enter the scope of the class, so that name lookups into base classes, etc. will work correctly. This is precisely analogous to what we do in begin_class_definition when defining an ordinary non-template class, except we also need to push the enclosing classes. */ push_nested_class (type); /* Now members are processed in the order of declaration. */ for (member = CLASSTYPE_DECL_LIST (pattern); member; member = TREE_CHAIN (member)) { tree t = TREE_VALUE (member); if (TREE_PURPOSE (member)) { if (TYPE_P (t)) { if (LAMBDA_TYPE_P (t)) /* A closure type for a lambda in an NSDMI or default argument. Ignore it; it will be regenerated when needed. */ continue; /* Build new CLASSTYPE_NESTED_UTDS. */ tree newtag; bool class_template_p; class_template_p = (TREE_CODE (t) != ENUMERAL_TYPE && TYPE_LANG_SPECIFIC (t) && CLASSTYPE_IS_TEMPLATE (t)); /* If the member is a class template, then -- even after substitution -- there may be dependent types in the template argument list for the class. We increment PROCESSING_TEMPLATE_DECL so that dependent_type_p, as that function will assume that no types are dependent when outside of a template. */ if (class_template_p) ++processing_template_decl; newtag = tsubst (t, args, tf_error, NULL_TREE); if (class_template_p) --processing_template_decl; if (newtag == error_mark_node) continue; if (TREE_CODE (newtag) != ENUMERAL_TYPE) { tree name = TYPE_IDENTIFIER (t); if (class_template_p) /* Unfortunately, lookup_template_class sets CLASSTYPE_IMPLICIT_INSTANTIATION for a partial instantiation (i.e., for the type of a member template class nested within a template class.) This behavior is required for maybe_process_partial_specialization to work correctly, but is not accurate in this case; the TAG is not an instantiation of anything. (The corresponding TEMPLATE_DECL is an instantiation, but the TYPE is not.) */ CLASSTYPE_USE_TEMPLATE (newtag) = 0; /* Now, we call pushtag to put this NEWTAG into the scope of TYPE. We first set up the IDENTIFIER_TYPE_VALUE to avoid pushtag calling push_template_decl. We don't have to do this for enums because it will already have been done in tsubst_enum. */ if (name) SET_IDENTIFIER_TYPE_VALUE (name, newtag); pushtag (name, newtag, /*tag_scope=*/ts_current); } } else if (DECL_DECLARES_FUNCTION_P (t)) { tree r; if (TREE_CODE (t) == TEMPLATE_DECL) ++processing_template_decl; r = tsubst (t, args, tf_error, NULL_TREE); if (TREE_CODE (t) == TEMPLATE_DECL) --processing_template_decl; set_current_access_from_decl (r); finish_member_declaration (r); /* Instantiate members marked with attribute used. */ if (r != error_mark_node && DECL_PRESERVE_P (r)) mark_used (r); if (TREE_CODE (r) == FUNCTION_DECL && DECL_OMP_DECLARE_REDUCTION_P (r)) cp_check_omp_declare_reduction (r); } else if ((DECL_CLASS_TEMPLATE_P (t) || DECL_IMPLICIT_TYPEDEF_P (t)) && LAMBDA_TYPE_P (TREE_TYPE (t))) /* A closure type for a lambda in an NSDMI or default argument. Ignore it; it will be regenerated when needed. */; else { /* Build new TYPE_FIELDS. */ if (TREE_CODE (t) == STATIC_ASSERT) { tree condition; ++c_inhibit_evaluation_warnings; condition = tsubst_expr (STATIC_ASSERT_CONDITION (t), args, tf_warning_or_error, NULL_TREE, /*integral_constant_expression_p=*/true); --c_inhibit_evaluation_warnings; finish_static_assert (condition, STATIC_ASSERT_MESSAGE (t), STATIC_ASSERT_SOURCE_LOCATION (t), /*member_p=*/true); } else if (TREE_CODE (t) != CONST_DECL) { tree r; tree vec = NULL_TREE; int len = 1; /* The file and line for this declaration, to assist in error message reporting. Since we called push_tinst_level above, we don't need to restore these. */ input_location = DECL_SOURCE_LOCATION (t); if (TREE_CODE (t) == TEMPLATE_DECL) ++processing_template_decl; r = tsubst (t, args, tf_warning_or_error, NULL_TREE); if (TREE_CODE (t) == TEMPLATE_DECL) --processing_template_decl; if (TREE_CODE (r) == TREE_VEC) { /* A capture pack became multiple fields. */ vec = r; len = TREE_VEC_LENGTH (vec); } for (int i = 0; i < len; ++i) { if (vec) r = TREE_VEC_ELT (vec, i); if (VAR_P (r)) { /* In [temp.inst]: [t]he initialization (and any associated side-effects) of a static data member does not occur unless the static data member is itself used in a way that requires the definition of the static data member to exist. Therefore, we do not substitute into the initialized for the static data member here. */ finish_static_data_member_decl (r, /*init=*/NULL_TREE, /*init_const_expr_p=*/false, /*asmspec_tree=*/NULL_TREE, /*flags=*/0); /* Instantiate members marked with attribute used. */ if (r != error_mark_node && DECL_PRESERVE_P (r)) mark_used (r); } else if (TREE_CODE (r) == FIELD_DECL) { /* Determine whether R has a valid type and can be completed later. If R is invalid, then its type is replaced by error_mark_node. */ tree rtype = TREE_TYPE (r); if (can_complete_type_without_circularity (rtype)) complete_type (rtype); if (!complete_or_array_type_p (rtype)) { /* If R's type couldn't be completed and it isn't a flexible array member (whose type is incomplete by definition) give an error. */ cxx_incomplete_type_error (r, rtype); TREE_TYPE (r) = error_mark_node; } else if (TREE_CODE (rtype) == ARRAY_TYPE && TYPE_DOMAIN (rtype) == NULL_TREE && (TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == QUAL_UNION_TYPE)) { error ("flexible array member %qD in union", r); TREE_TYPE (r) = error_mark_node; } } /* If it is a TYPE_DECL for a class-scoped ENUMERAL_TYPE, such a thing will already have been added to the field list by tsubst_enum in finish_member_declaration in the CLASSTYPE_NESTED_UTDS case above. */ if (!(TREE_CODE (r) == TYPE_DECL && TREE_CODE (TREE_TYPE (r)) == ENUMERAL_TYPE && DECL_ARTIFICIAL (r))) { set_current_access_from_decl (r); finish_member_declaration (r); } } } } } else { if (TYPE_P (t) || DECL_CLASS_TEMPLATE_P (t) || DECL_TEMPLATE_TEMPLATE_PARM_P (t)) { /* Build new CLASSTYPE_FRIEND_CLASSES. */ tree friend_type = t; bool adjust_processing_template_decl = false; if (TREE_CODE (friend_type) == TEMPLATE_DECL) { /* template <class T> friend class C; */ friend_type = tsubst_friend_class (friend_type, args); adjust_processing_template_decl = true; } else if (TREE_CODE (friend_type) == UNBOUND_CLASS_TEMPLATE) { /* template <class T> friend class C::D; */ friend_type = tsubst (friend_type, args, tf_warning_or_error, NULL_TREE); if (TREE_CODE (friend_type) == TEMPLATE_DECL) friend_type = TREE_TYPE (friend_type); adjust_processing_template_decl = true; } else if (TREE_CODE (friend_type) == TYPENAME_TYPE || TREE_CODE (friend_type) == TEMPLATE_TYPE_PARM) { /* This could be either friend class T::C; when dependent_type_p is false or template <class U> friend class T::C; otherwise. */ /* Bump processing_template_decl in case this is something like template <class T> friend struct A<T>::B. */ ++processing_template_decl; friend_type = tsubst (friend_type, args, tf_warning_or_error, NULL_TREE); if (dependent_type_p (friend_type)) adjust_processing_template_decl = true; --processing_template_decl; } else if (TREE_CODE (friend_type) != BOUND_TEMPLATE_TEMPLATE_PARM && !CLASSTYPE_USE_TEMPLATE (friend_type) && TYPE_HIDDEN_P (friend_type)) { /* friend class C; where C hasn't been declared yet. Let's lookup name from namespace scope directly, bypassing any name that come from dependent base class. */ tree ns = decl_namespace_context (TYPE_MAIN_DECL (friend_type)); /* The call to xref_tag_from_type does injection for friend classes. */ push_nested_namespace (ns); friend_type = xref_tag_from_type (friend_type, NULL_TREE, /*tag_scope=*/ts_current); pop_nested_namespace (ns); } else if (uses_template_parms (friend_type)) /* friend class C<T>; */ friend_type = tsubst (friend_type, args, tf_warning_or_error, NULL_TREE); /* Otherwise it's friend class C; where C is already declared or friend class C<int>; We don't have to do anything in these cases. */ if (adjust_processing_template_decl) /* Trick make_friend_class into realizing that the friend we're adding is a template, not an ordinary class. It's important that we use make_friend_class since it will perform some error-checking and output cross-reference information. */ ++processing_template_decl; if (friend_type != error_mark_node) make_friend_class (type, friend_type, /*complain=*/false); if (adjust_processing_template_decl) --processing_template_decl; } else { /* Build new DECL_FRIENDLIST. */ tree r; /* The file and line for this declaration, to assist in error message reporting. Since we called push_tinst_level above, we don't need to restore these. */ input_location = DECL_SOURCE_LOCATION (t); if (TREE_CODE (t) == TEMPLATE_DECL) { ++processing_template_decl; push_deferring_access_checks (dk_no_check); } r = tsubst_friend_function (t, args); add_friend (type, r, /*complain=*/false); if (TREE_CODE (t) == TEMPLATE_DECL) { pop_deferring_access_checks (); --processing_template_decl; } } } } if (fn_context) { /* Restore these before substituting into the lambda capture initializers. */ cp_unevaluated_operand = saved_unevaluated_operand; c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings; } /* Set the file and line number information to whatever is given for the class itself. This puts error messages involving generated implicit functions at a predictable point, and the same point that would be used for non-template classes. */ input_location = DECL_SOURCE_LOCATION (typedecl); unreverse_member_declarations (type); finish_struct_1 (type); TYPE_BEING_DEFINED (type) = 0; /* We don't instantiate default arguments for member functions. 14.7.1: The implicit instantiation of a class template specialization causes the implicit instantiation of the declarations, but not of the definitions or default arguments, of the class member functions, member classes, static data members and member templates.... */ /* Some typedefs referenced from within the template code need to be access checked at template instantiation time, i.e now. These types were added to the template at parsing time. Let's get those and perform the access checks then. */ perform_typedefs_access_check (pattern, args); perform_deferred_access_checks (tf_warning_or_error); pop_nested_class (); maximum_field_alignment = saved_maximum_field_alignment; if (!fn_context) pop_from_top_level (); pop_deferring_access_checks (); pop_tinst_level (); /* The vtable for a template class can be emitted in any translation unit in which the class is instantiated. When there is no key method, however, finish_struct_1 will already have added TYPE to the keyed_classes. */ if (TYPE_CONTAINS_VPTR_P (type) && CLASSTYPE_KEY_METHOD (type)) vec_safe_push (keyed_classes, type); return type; } /* Wrapper for instantiate_class_template_1. */ tree instantiate_class_template (tree type) { tree ret; timevar_push (TV_TEMPLATE_INST); ret = instantiate_class_template_1 (type); timevar_pop (TV_TEMPLATE_INST); return ret; } static tree tsubst_template_arg (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree r; if (!t) r = t; else if (TYPE_P (t)) r = tsubst (t, args, complain, in_decl); else { if (!(complain & tf_warning)) ++c_inhibit_evaluation_warnings; r = tsubst_expr (t, args, complain, in_decl, /*integral_constant_expression_p=*/true); if (!(complain & tf_warning)) --c_inhibit_evaluation_warnings; } return r; } /* Given a function parameter pack TMPL_PARM and some function parameters instantiated from it at *SPEC_P, return a NONTYPE_ARGUMENT_PACK of them and set *SPEC_P to point at the next point in the list. */ tree extract_fnparm_pack (tree tmpl_parm, tree *spec_p) { /* Collect all of the extra "packed" parameters into an argument pack. */ tree parmvec; tree argpack = make_node (NONTYPE_ARGUMENT_PACK); tree spec_parm = *spec_p; int i, len; for (len = 0; spec_parm; ++len, spec_parm = TREE_CHAIN (spec_parm)) if (tmpl_parm && !function_parameter_expanded_from_pack_p (spec_parm, tmpl_parm)) break; /* Fill in PARMVEC and PARMTYPEVEC with all of the parameters. */ parmvec = make_tree_vec (len); spec_parm = *spec_p; for (i = 0; i < len; i++, spec_parm = DECL_CHAIN (spec_parm)) { tree elt = spec_parm; if (DECL_PACK_P (elt)) elt = make_pack_expansion (elt); TREE_VEC_ELT (parmvec, i) = elt; } /* Build the argument packs. */ SET_ARGUMENT_PACK_ARGS (argpack, parmvec); *spec_p = spec_parm; return argpack; } /* Give a chain SPEC_PARM of PARM_DECLs, pack them into a NONTYPE_ARGUMENT_PACK. */ static tree make_fnparm_pack (tree spec_parm) { return extract_fnparm_pack (NULL_TREE, &spec_parm); } /* Return 1 if the Ith element of the argument pack ARG_PACK is a pack expansion with no extra args, 2 if it has extra args, or 0 if it is not a pack expansion. */ static int argument_pack_element_is_expansion_p (tree arg_pack, int i) { tree vec = ARGUMENT_PACK_ARGS (arg_pack); if (i >= TREE_VEC_LENGTH (vec)) return 0; tree elt = TREE_VEC_ELT (vec, i); if (DECL_P (elt)) /* A decl pack is itself an expansion. */ elt = TREE_TYPE (elt); if (!PACK_EXPANSION_P (elt)) return 0; if (PACK_EXPANSION_EXTRA_ARGS (elt)) return 2; return 1; } /* Creates and return an ARGUMENT_PACK_SELECT tree node. */ static tree make_argument_pack_select (tree arg_pack, unsigned index) { tree aps = make_node (ARGUMENT_PACK_SELECT); ARGUMENT_PACK_SELECT_FROM_PACK (aps) = arg_pack; ARGUMENT_PACK_SELECT_INDEX (aps) = index; return aps; } /* This is a subroutine of tsubst_pack_expansion. It returns TRUE if we need to use the PACK_EXPANSION_EXTRA_ARGS mechanism to store the (non complete list of) arguments of the substitution and return a non substituted pack expansion, in order to wait for when we have enough arguments to really perform the substitution. */ static bool use_pack_expansion_extra_args_p (tree parm_packs, int arg_pack_len, bool has_empty_arg) { /* If one pack has an expansion and another pack has a normal argument or if one pack has an empty argument and an another one hasn't then tsubst_pack_expansion cannot perform the substitution and need to fall back on the PACK_EXPANSION_EXTRA mechanism. */ if (parm_packs == NULL_TREE) return false; else if (has_empty_arg) return true; bool has_expansion_arg = false; for (int i = 0 ; i < arg_pack_len; ++i) { bool has_non_expansion_arg = false; for (tree parm_pack = parm_packs; parm_pack; parm_pack = TREE_CHAIN (parm_pack)) { tree arg = TREE_VALUE (parm_pack); int exp = argument_pack_element_is_expansion_p (arg, i); if (exp == 2) /* We can't substitute a pack expansion with extra args into our pattern. */ return true; else if (exp) has_expansion_arg = true; else has_non_expansion_arg = true; } if (has_expansion_arg && has_non_expansion_arg) return true; } return false; } /* [temp.variadic]/6 says that: The instantiation of a pack expansion [...] produces a list E1,E2, ..., En, where N is the number of elements in the pack expansion parameters. This subroutine of tsubst_pack_expansion produces one of these Ei. PATTERN is the pattern of the pack expansion. PARM_PACKS is a TREE_LIST in which each TREE_PURPOSE is a parameter pack of PATTERN, and each TREE_VALUE is its corresponding argument pack. INDEX is the index 'i' of the element Ei to produce. ARGS, COMPLAIN, and IN_DECL are the same parameters as for the tsubst_pack_expansion function. The function returns the resulting Ei upon successful completion, or error_mark_node. Note that this function possibly modifies the ARGS parameter, so it's the responsibility of the caller to restore it. */ static tree gen_elem_of_pack_expansion_instantiation (tree pattern, tree parm_packs, unsigned index, tree args /* This parm gets modified. */, tsubst_flags_t complain, tree in_decl) { tree t; bool ith_elem_is_expansion = false; /* For each parameter pack, change the substitution of the parameter pack to the ith argument in its argument pack, then expand the pattern. */ for (tree pack = parm_packs; pack; pack = TREE_CHAIN (pack)) { tree parm = TREE_PURPOSE (pack); tree arg_pack = TREE_VALUE (pack); tree aps; /* instance of ARGUMENT_PACK_SELECT. */ ith_elem_is_expansion |= argument_pack_element_is_expansion_p (arg_pack, index); /* Select the Ith argument from the pack. */ if (TREE_CODE (parm) == PARM_DECL || VAR_P (parm) || TREE_CODE (parm) == FIELD_DECL) { if (index == 0) { aps = make_argument_pack_select (arg_pack, index); if (!mark_used (parm, complain) && !(complain & tf_error)) return error_mark_node; register_local_specialization (aps, parm); } else aps = retrieve_local_specialization (parm); } else { int idx, level; template_parm_level_and_index (parm, &level, &idx); if (index == 0) { aps = make_argument_pack_select (arg_pack, index); /* Update the corresponding argument. */ TMPL_ARG (args, level, idx) = aps; } else /* Re-use the ARGUMENT_PACK_SELECT. */ aps = TMPL_ARG (args, level, idx); } ARGUMENT_PACK_SELECT_INDEX (aps) = index; } /* Substitute into the PATTERN with the (possibly altered) arguments. */ if (pattern == in_decl) /* Expanding a fixed parameter pack from coerce_template_parameter_pack. */ t = tsubst_decl (pattern, args, complain); else if (pattern == error_mark_node) t = error_mark_node; else if (constraint_p (pattern)) { if (processing_template_decl) t = tsubst_constraint (pattern, args, complain, in_decl); else t = (constraints_satisfied_p (pattern, args) ? boolean_true_node : boolean_false_node); } else if (!TYPE_P (pattern)) t = tsubst_expr (pattern, args, complain, in_decl, /*integral_constant_expression_p=*/false); else t = tsubst (pattern, args, complain, in_decl); /* If the Ith argument pack element is a pack expansion, then the Ith element resulting from the substituting is going to be a pack expansion as well. */ if (ith_elem_is_expansion) t = make_pack_expansion (t, complain); return t; } /* When the unexpanded parameter pack in a fold expression expands to an empty sequence, the value of the expression is as follows; the program is ill-formed if the operator is not listed in this table. && true || false , void() */ tree expand_empty_fold (tree t, tsubst_flags_t complain) { tree_code code = (tree_code)TREE_INT_CST_LOW (TREE_OPERAND (t, 0)); if (!FOLD_EXPR_MODIFY_P (t)) switch (code) { case TRUTH_ANDIF_EXPR: return boolean_true_node; case TRUTH_ORIF_EXPR: return boolean_false_node; case COMPOUND_EXPR: return void_node; default: break; } if (complain & tf_error) error_at (location_of (t), "fold of empty expansion over %O", code); return error_mark_node; } /* Given a fold-expression T and a current LEFT and RIGHT operand, form an expression that combines the two terms using the operator of T. */ static tree fold_expression (tree t, tree left, tree right, tsubst_flags_t complain) { tree op = FOLD_EXPR_OP (t); tree_code code = (tree_code)TREE_INT_CST_LOW (op); // Handle compound assignment operators. if (FOLD_EXPR_MODIFY_P (t)) return build_x_modify_expr (input_location, left, code, right, complain); switch (code) { case COMPOUND_EXPR: return build_x_compound_expr (input_location, left, right, complain); case DOTSTAR_EXPR: return build_m_component_ref (left, right, complain); default: return build_x_binary_op (input_location, code, left, TREE_CODE (left), right, TREE_CODE (right), /*overload=*/NULL, complain); } } /* Substitute ARGS into the pack of a fold expression T. */ static inline tree tsubst_fold_expr_pack (tree t, tree args, tsubst_flags_t complain, tree in_decl) { return tsubst_pack_expansion (FOLD_EXPR_PACK (t), args, complain, in_decl); } /* Substitute ARGS into the pack of a fold expression T. */ static inline tree tsubst_fold_expr_init (tree t, tree args, tsubst_flags_t complain, tree in_decl) { return tsubst_expr (FOLD_EXPR_INIT (t), args, complain, in_decl, false); } /* Expand a PACK of arguments into a grouped as left fold. Given a pack containing elements A0, A1, ..., An and an operator @, this builds the expression: ((A0 @ A1) @ A2) ... @ An Note that PACK must not be empty. The operator is defined by the original fold expression T. */ static tree expand_left_fold (tree t, tree pack, tsubst_flags_t complain) { tree left = TREE_VEC_ELT (pack, 0); for (int i = 1; i < TREE_VEC_LENGTH (pack); ++i) { tree right = TREE_VEC_ELT (pack, i); left = fold_expression (t, left, right, complain); } return left; } /* Substitute into a unary left fold expression. */ static tree tsubst_unary_left_fold (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl); if (pack == error_mark_node) return error_mark_node; if (PACK_EXPANSION_P (pack)) { tree r = copy_node (t); FOLD_EXPR_PACK (r) = pack; return r; } if (TREE_VEC_LENGTH (pack) == 0) return expand_empty_fold (t, complain); else return expand_left_fold (t, pack, complain); } /* Substitute into a binary left fold expression. Do ths by building a single (non-empty) vector of argumnts and building the expression from those elements. */ static tree tsubst_binary_left_fold (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl); if (pack == error_mark_node) return error_mark_node; tree init = tsubst_fold_expr_init (t, args, complain, in_decl); if (init == error_mark_node) return error_mark_node; if (PACK_EXPANSION_P (pack)) { tree r = copy_node (t); FOLD_EXPR_PACK (r) = pack; FOLD_EXPR_INIT (r) = init; return r; } tree vec = make_tree_vec (TREE_VEC_LENGTH (pack) + 1); TREE_VEC_ELT (vec, 0) = init; for (int i = 0; i < TREE_VEC_LENGTH (pack); ++i) TREE_VEC_ELT (vec, i + 1) = TREE_VEC_ELT (pack, i); return expand_left_fold (t, vec, complain); } /* Expand a PACK of arguments into a grouped as right fold. Given a pack containing elementns A0, A1, ..., and an operator @, this builds the expression: A0@ ... (An-2 @ (An-1 @ An)) Note that PACK must not be empty. The operator is defined by the original fold expression T. */ tree expand_right_fold (tree t, tree pack, tsubst_flags_t complain) { // Build the expression. int n = TREE_VEC_LENGTH (pack); tree right = TREE_VEC_ELT (pack, n - 1); for (--n; n != 0; --n) { tree left = TREE_VEC_ELT (pack, n - 1); right = fold_expression (t, left, right, complain); } return right; } /* Substitute into a unary right fold expression. */ static tree tsubst_unary_right_fold (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl); if (pack == error_mark_node) return error_mark_node; if (PACK_EXPANSION_P (pack)) { tree r = copy_node (t); FOLD_EXPR_PACK (r) = pack; return r; } if (TREE_VEC_LENGTH (pack) == 0) return expand_empty_fold (t, complain); else return expand_right_fold (t, pack, complain); } /* Substitute into a binary right fold expression. Do ths by building a single (non-empty) vector of arguments and building the expression from those elements. */ static tree tsubst_binary_right_fold (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl); if (pack == error_mark_node) return error_mark_node; tree init = tsubst_fold_expr_init (t, args, complain, in_decl); if (init == error_mark_node) return error_mark_node; if (PACK_EXPANSION_P (pack)) { tree r = copy_node (t); FOLD_EXPR_PACK (r) = pack; FOLD_EXPR_INIT (r) = init; return r; } int n = TREE_VEC_LENGTH (pack); tree vec = make_tree_vec (n + 1); for (int i = 0; i < n; ++i) TREE_VEC_ELT (vec, i) = TREE_VEC_ELT (pack, i); TREE_VEC_ELT (vec, n) = init; return expand_right_fold (t, vec, complain); } /* Walk through the pattern of a pack expansion, adding everything in local_specializations to a list. */ struct el_data { hash_set<tree> internal; tree extra; tsubst_flags_t complain; el_data (tsubst_flags_t c) : extra (NULL_TREE), complain (c) {} }; static tree extract_locals_r (tree *tp, int */*walk_subtrees*/, void *data_) { el_data &data = *reinterpret_cast<el_data*>(data_); tree *extra = &data.extra; tsubst_flags_t complain = data.complain; if (TYPE_P (*tp) && typedef_variant_p (*tp)) /* Remember local typedefs (85214). */ tp = &TYPE_NAME (*tp); if (TREE_CODE (*tp) == DECL_EXPR) data.internal.add (DECL_EXPR_DECL (*tp)); else if (tree spec = retrieve_local_specialization (*tp)) { if (data.internal.contains (*tp)) /* Don't mess with variables declared within the pattern. */ return NULL_TREE; if (TREE_CODE (spec) == NONTYPE_ARGUMENT_PACK) { /* Maybe pull out the PARM_DECL for a partial instantiation. */ tree args = ARGUMENT_PACK_ARGS (spec); if (TREE_VEC_LENGTH (args) == 1) { tree elt = TREE_VEC_ELT (args, 0); if (PACK_EXPANSION_P (elt)) elt = PACK_EXPANSION_PATTERN (elt); if (DECL_PACK_P (elt)) spec = elt; } if (TREE_CODE (spec) == NONTYPE_ARGUMENT_PACK) { /* Handle lambda capture here, since we aren't doing any substitution now, and so tsubst_copy won't call process_outer_var_ref. */ tree args = ARGUMENT_PACK_ARGS (spec); int len = TREE_VEC_LENGTH (args); for (int i = 0; i < len; ++i) { tree arg = TREE_VEC_ELT (args, i); tree carg = arg; if (outer_automatic_var_p (arg)) carg = process_outer_var_ref (arg, complain); if (carg != arg) { /* Make a new NONTYPE_ARGUMENT_PACK of the capture proxies. */ if (i == 0) { spec = copy_node (spec); args = copy_node (args); SET_ARGUMENT_PACK_ARGS (spec, args); register_local_specialization (spec, *tp); } TREE_VEC_ELT (args, i) = carg; } } } } if (outer_automatic_var_p (spec)) spec = process_outer_var_ref (spec, complain); *extra = tree_cons (*tp, spec, *extra); } return NULL_TREE; } static tree extract_local_specs (tree pattern, tsubst_flags_t complain) { el_data data (complain); cp_walk_tree_without_duplicates (&pattern, extract_locals_r, &data); return data.extra; } /* Extract any uses of local_specializations from PATTERN and add them to ARGS for use in PACK_EXPANSION_EXTRA_ARGS. */ tree build_extra_args (tree pattern, tree args, tsubst_flags_t complain) { tree extra = args; if (local_specializations) if (tree locals = extract_local_specs (pattern, complain)) extra = tree_cons (NULL_TREE, extra, locals); return extra; } /* Apply any local specializations from PACK_EXPANSION_EXTRA_ARGS and add the normal template args to ARGS. */ tree add_extra_args (tree extra, tree args) { if (extra && TREE_CODE (extra) == TREE_LIST) { for (tree elt = TREE_CHAIN (extra); elt; elt = TREE_CHAIN (elt)) { /* The partial instantiation involved local declarations collected in extract_local_specs; map from the general template to our local context. */ tree gen = TREE_PURPOSE (elt); tree inst = TREE_VALUE (elt); if (DECL_P (inst)) if (tree local = retrieve_local_specialization (inst)) inst = local; /* else inst is already a full instantiation of the pack. */ register_local_specialization (inst, gen); } gcc_assert (!TREE_PURPOSE (extra)); extra = TREE_VALUE (extra); } return add_to_template_args (extra, args); } /* Substitute ARGS into T, which is an pack expansion (i.e. TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION). Returns a TREE_VEC with the substituted arguments, a PACK_EXPANSION_* node (if only a partial substitution could be performed) or ERROR_MARK_NODE if there was an error. */ tree tsubst_pack_expansion (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree pattern; tree pack, packs = NULL_TREE; bool unsubstituted_packs = false; bool unsubstituted_fn_pack = false; int i, len = -1; tree result; hash_map<tree, tree> *saved_local_specializations = NULL; bool need_local_specializations = false; int levels; gcc_assert (PACK_EXPANSION_P (t)); pattern = PACK_EXPANSION_PATTERN (t); /* Add in any args remembered from an earlier partial instantiation. */ args = add_extra_args (PACK_EXPANSION_EXTRA_ARGS (t), args); levels = TMPL_ARGS_DEPTH (args); /* Determine the argument packs that will instantiate the parameter packs used in the expansion expression. While we're at it, compute the number of arguments to be expanded and make sure it is consistent. */ for (pack = PACK_EXPANSION_PARAMETER_PACKS (t); pack; pack = TREE_CHAIN (pack)) { tree parm_pack = TREE_VALUE (pack); tree arg_pack = NULL_TREE; tree orig_arg = NULL_TREE; int level = 0; if (TREE_CODE (parm_pack) == BASES) { gcc_assert (parm_pack == pattern); if (BASES_DIRECT (parm_pack)) return calculate_direct_bases (tsubst_expr (BASES_TYPE (parm_pack), args, complain, in_decl, false), complain); else return calculate_bases (tsubst_expr (BASES_TYPE (parm_pack), args, complain, in_decl, false), complain); } else if (builtin_pack_call_p (parm_pack)) { /* ??? Support use in other patterns. */ gcc_assert (parm_pack == pattern); return expand_builtin_pack_call (parm_pack, args, complain, in_decl); } else if (TREE_CODE (parm_pack) == PARM_DECL) { /* We know we have correct local_specializations if this expansion is at function scope, or if we're dealing with a local parameter in a requires expression; for the latter, tsubst_requires_expr set it up appropriately. */ if (PACK_EXPANSION_LOCAL_P (t) || CONSTRAINT_VAR_P (parm_pack)) arg_pack = retrieve_local_specialization (parm_pack); else /* We can't rely on local_specializations for a parameter name used later in a function declaration (such as in a late-specified return type). Even if it exists, it might have the wrong value for a recursive call. */ need_local_specializations = true; if (!arg_pack) { /* This parameter pack was used in an unevaluated context. Just make a dummy decl, since it's only used for its type. */ ++cp_unevaluated_operand; arg_pack = tsubst_decl (parm_pack, args, complain); --cp_unevaluated_operand; if (arg_pack && DECL_PACK_P (arg_pack)) /* Partial instantiation of the parm_pack, we can't build up an argument pack yet. */ arg_pack = NULL_TREE; else arg_pack = make_fnparm_pack (arg_pack); } else if (argument_pack_element_is_expansion_p (arg_pack, 0)) /* This argument pack isn't fully instantiated yet. We set this flag rather than clear arg_pack because we do want to do the optimization below, and we don't want to substitute directly into the pattern (as that would expose a NONTYPE_ARGUMENT_PACK where it isn't expected). */ unsubstituted_fn_pack = true; } else if (is_normal_capture_proxy (parm_pack)) { arg_pack = retrieve_local_specialization (parm_pack); if (argument_pack_element_is_expansion_p (arg_pack, 0)) unsubstituted_fn_pack = true; } else { int idx; template_parm_level_and_index (parm_pack, &level, &idx); if (level <= levels) arg_pack = TMPL_ARG (args, level, idx); } orig_arg = arg_pack; if (arg_pack && TREE_CODE (arg_pack) == ARGUMENT_PACK_SELECT) arg_pack = ARGUMENT_PACK_SELECT_FROM_PACK (arg_pack); if (arg_pack && !ARGUMENT_PACK_P (arg_pack)) /* This can only happen if we forget to expand an argument pack somewhere else. Just return an error, silently. */ { result = make_tree_vec (1); TREE_VEC_ELT (result, 0) = error_mark_node; return result; } if (arg_pack) { int my_len = TREE_VEC_LENGTH (ARGUMENT_PACK_ARGS (arg_pack)); /* Don't bother trying to do a partial substitution with incomplete packs; we'll try again after deduction. */ if (ARGUMENT_PACK_INCOMPLETE_P (arg_pack)) return t; if (len < 0) len = my_len; else if (len != my_len && !unsubstituted_fn_pack) { if (!(complain & tf_error)) /* Fail quietly. */; else if (TREE_CODE (t) == TYPE_PACK_EXPANSION) error ("mismatched argument pack lengths while expanding %qT", pattern); else error ("mismatched argument pack lengths while expanding %qE", pattern); return error_mark_node; } /* Keep track of the parameter packs and their corresponding argument packs. */ packs = tree_cons (parm_pack, arg_pack, packs); TREE_TYPE (packs) = orig_arg; } else { /* We can't substitute for this parameter pack. We use a flag as well as the missing_level counter because function parameter packs don't have a level. */ gcc_assert (processing_template_decl || is_auto (parm_pack)); unsubstituted_packs = true; } } /* If the expansion is just T..., return the matching argument pack, unless we need to call convert_from_reference on all the elements. This is an important optimization; see c++/68422. */ if (!unsubstituted_packs && TREE_PURPOSE (packs) == pattern) { tree args = ARGUMENT_PACK_ARGS (TREE_VALUE (packs)); /* If the argument pack is a single pack expansion, pull it out. */ if (TREE_VEC_LENGTH (args) == 1 && pack_expansion_args_count (args)) return TREE_VEC_ELT (args, 0); /* Types need no adjustment, nor does sizeof..., and if we still have some pack expansion args we won't do anything yet. */ if (TREE_CODE (t) == TYPE_PACK_EXPANSION || PACK_EXPANSION_SIZEOF_P (t) || pack_expansion_args_count (args)) return args; /* Also optimize expression pack expansions if we can tell that the elements won't have reference type. */ tree type = TREE_TYPE (pattern); if (type && TREE_CODE (type) != REFERENCE_TYPE && !PACK_EXPANSION_P (type) && !WILDCARD_TYPE_P (type)) return args; /* Otherwise use the normal path so we get convert_from_reference. */ } /* We cannot expand this expansion expression, because we don't have all of the argument packs we need. */ if (use_pack_expansion_extra_args_p (packs, len, unsubstituted_packs)) { /* We got some full packs, but we can't substitute them in until we have values for all the packs. So remember these until then. */ t = make_pack_expansion (pattern, complain); PACK_EXPANSION_EXTRA_ARGS (t) = build_extra_args (pattern, args, complain); return t; } else if (unsubstituted_packs) { /* There were no real arguments, we're just replacing a parameter pack with another version of itself. Substitute into the pattern and return a PACK_EXPANSION_*. The caller will need to deal with that. */ if (TREE_CODE (t) == EXPR_PACK_EXPANSION) t = tsubst_expr (pattern, args, complain, in_decl, /*integral_constant_expression_p=*/false); else t = tsubst (pattern, args, complain, in_decl); t = make_pack_expansion (t, complain); return t; } gcc_assert (len >= 0); if (need_local_specializations) { /* We're in a late-specified return type, so create our own local specializations map; the current map is either NULL or (in the case of recursive unification) might have bindings that we don't want to use or alter. */ saved_local_specializations = local_specializations; local_specializations = new hash_map<tree, tree>; } /* For each argument in each argument pack, substitute into the pattern. */ result = make_tree_vec (len); tree elem_args = copy_template_args (args); for (i = 0; i < len; ++i) { t = gen_elem_of_pack_expansion_instantiation (pattern, packs, i, elem_args, complain, in_decl); TREE_VEC_ELT (result, i) = t; if (t == error_mark_node) { result = error_mark_node; break; } } /* Update ARGS to restore the substitution from parameter packs to their argument packs. */ for (pack = packs; pack; pack = TREE_CHAIN (pack)) { tree parm = TREE_PURPOSE (pack); if (TREE_CODE (parm) == PARM_DECL || VAR_P (parm) || TREE_CODE (parm) == FIELD_DECL) register_local_specialization (TREE_TYPE (pack), parm); else { int idx, level; if (TREE_VALUE (pack) == NULL_TREE) continue; template_parm_level_and_index (parm, &level, &idx); /* Update the corresponding argument. */ if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (args)) TREE_VEC_ELT (TREE_VEC_ELT (args, level -1 ), idx) = TREE_TYPE (pack); else TREE_VEC_ELT (args, idx) = TREE_TYPE (pack); } } if (need_local_specializations) { delete local_specializations; local_specializations = saved_local_specializations; } /* If the dependent pack arguments were such that we end up with only a single pack expansion again, there's no need to keep it in a TREE_VEC. */ if (len == 1 && TREE_CODE (result) == TREE_VEC && PACK_EXPANSION_P (TREE_VEC_ELT (result, 0))) return TREE_VEC_ELT (result, 0); return result; } /* Given PARM_DECL PARM, find the corresponding PARM_DECL in the template TMPL. We do this using DECL_PARM_INDEX, which should work even with parameter packs; all parms generated from a function parameter pack will have the same DECL_PARM_INDEX. */ tree get_pattern_parm (tree parm, tree tmpl) { tree pattern = DECL_TEMPLATE_RESULT (tmpl); tree patparm; if (DECL_ARTIFICIAL (parm)) { for (patparm = DECL_ARGUMENTS (pattern); patparm; patparm = DECL_CHAIN (patparm)) if (DECL_ARTIFICIAL (patparm) && DECL_NAME (parm) == DECL_NAME (patparm)) break; } else { patparm = FUNCTION_FIRST_USER_PARM (DECL_TEMPLATE_RESULT (tmpl)); patparm = chain_index (DECL_PARM_INDEX (parm)-1, patparm); gcc_assert (DECL_PARM_INDEX (patparm) == DECL_PARM_INDEX (parm)); } return patparm; } /* Make an argument pack out of the TREE_VEC VEC. */ static tree make_argument_pack (tree vec) { tree pack; tree elt = TREE_VEC_ELT (vec, 0); if (TYPE_P (elt)) pack = cxx_make_type (TYPE_ARGUMENT_PACK); else { pack = make_node (NONTYPE_ARGUMENT_PACK); TREE_CONSTANT (pack) = 1; } SET_ARGUMENT_PACK_ARGS (pack, vec); return pack; } /* Return an exact copy of template args T that can be modified independently. */ static tree copy_template_args (tree t) { if (t == error_mark_node) return t; int len = TREE_VEC_LENGTH (t); tree new_vec = make_tree_vec (len); for (int i = 0; i < len; ++i) { tree elt = TREE_VEC_ELT (t, i); if (elt && TREE_CODE (elt) == TREE_VEC) elt = copy_template_args (elt); TREE_VEC_ELT (new_vec, i) = elt; } NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_vec) = NON_DEFAULT_TEMPLATE_ARGS_COUNT (t); return new_vec; } /* Substitute ARGS into the vector or list of template arguments T. */ static tree tsubst_template_args (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree orig_t = t; int len, need_new = 0, i, expanded_len_adjust = 0, out; tree *elts; if (t == error_mark_node) return error_mark_node; len = TREE_VEC_LENGTH (t); elts = XALLOCAVEC (tree, len); for (i = 0; i < len; i++) { tree orig_arg = TREE_VEC_ELT (t, i); tree new_arg; if (TREE_CODE (orig_arg) == TREE_VEC) new_arg = tsubst_template_args (orig_arg, args, complain, in_decl); else if (PACK_EXPANSION_P (orig_arg)) { /* Substitute into an expansion expression. */ new_arg = tsubst_pack_expansion (orig_arg, args, complain, in_decl); if (TREE_CODE (new_arg) == TREE_VEC) /* Add to the expanded length adjustment the number of expanded arguments. We subtract one from this measurement, because the argument pack expression itself is already counted as 1 in LEN. EXPANDED_LEN_ADJUST can actually be negative, if the argument pack is empty. */ expanded_len_adjust += TREE_VEC_LENGTH (new_arg) - 1; } else if (ARGUMENT_PACK_P (orig_arg)) { /* Substitute into each of the arguments. */ new_arg = TYPE_P (orig_arg) ? cxx_make_type (TREE_CODE (orig_arg)) : make_node (TREE_CODE (orig_arg)); tree pack_args = tsubst_template_args (ARGUMENT_PACK_ARGS (orig_arg), args, complain, in_decl); if (pack_args == error_mark_node) new_arg = error_mark_node; else SET_ARGUMENT_PACK_ARGS (new_arg, pack_args); if (TREE_CODE (new_arg) == NONTYPE_ARGUMENT_PACK) TREE_CONSTANT (new_arg) = TREE_CONSTANT (orig_arg); } else new_arg = tsubst_template_arg (orig_arg, args, complain, in_decl); if (new_arg == error_mark_node) return error_mark_node; elts[i] = new_arg; if (new_arg != orig_arg) need_new = 1; } if (!need_new) return t; /* Make space for the expanded arguments coming from template argument packs. */ t = make_tree_vec (len + expanded_len_adjust); /* ORIG_T can contain TREE_VECs. That happens if ORIG_T contains the arguments for a member template. In that case each TREE_VEC in ORIG_T represents a level of template arguments, and ORIG_T won't carry any non defaulted argument count. It will rather be the nested TREE_VECs that will carry one. In other words, ORIG_T carries a non defaulted argument count only if it doesn't contain any nested TREE_VEC. */ if (NON_DEFAULT_TEMPLATE_ARGS_COUNT (orig_t)) { int count = GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (orig_t); count += expanded_len_adjust; SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (t, count); } for (i = 0, out = 0; i < len; i++) { if ((PACK_EXPANSION_P (TREE_VEC_ELT (orig_t, i)) || ARGUMENT_PACK_P (TREE_VEC_ELT (orig_t, i))) && TREE_CODE (elts[i]) == TREE_VEC) { int idx; /* Now expand the template argument pack "in place". */ for (idx = 0; idx < TREE_VEC_LENGTH (elts[i]); idx++, out++) TREE_VEC_ELT (t, out) = TREE_VEC_ELT (elts[i], idx); } else { TREE_VEC_ELT (t, out) = elts[i]; out++; } } return t; } /* Substitute ARGS into one level PARMS of template parameters. */ static tree tsubst_template_parms_level (tree parms, tree args, tsubst_flags_t complain) { if (parms == error_mark_node) return error_mark_node; tree new_vec = make_tree_vec (TREE_VEC_LENGTH (parms)); for (int i = 0; i < TREE_VEC_LENGTH (new_vec); ++i) { tree tuple = TREE_VEC_ELT (parms, i); if (tuple == error_mark_node) continue; TREE_VEC_ELT (new_vec, i) = tsubst_template_parm (tuple, args, complain); } return new_vec; } /* Return the result of substituting ARGS into the template parameters given by PARMS. If there are m levels of ARGS and m + n levels of PARMS, then the result will contain n levels of PARMS. For example, if PARMS is `template <class T> template <class U> template <T*, U, class V>' and ARGS is {{int}, {double}} then the result will be `template <int*, double, class V>'. */ static tree tsubst_template_parms (tree parms, tree args, tsubst_flags_t complain) { tree r = NULL_TREE; tree* new_parms; /* When substituting into a template, we must set PROCESSING_TEMPLATE_DECL as the template parameters may be dependent if they are based on one-another, and the dependency predicates are short-circuit outside of templates. */ ++processing_template_decl; for (new_parms = &r; parms && TMPL_PARMS_DEPTH (parms) > TMPL_ARGS_DEPTH (args); new_parms = &(TREE_CHAIN (*new_parms)), parms = TREE_CHAIN (parms)) { tree new_vec = tsubst_template_parms_level (TREE_VALUE (parms), args, complain); *new_parms = tree_cons (size_int (TMPL_PARMS_DEPTH (parms) - TMPL_ARGS_DEPTH (args)), new_vec, NULL_TREE); } --processing_template_decl; return r; } /* Return the result of substituting ARGS into one template parameter given by T. T Must be a TREE_LIST which TREE_VALUE is the template parameter and which TREE_PURPOSE is the default argument of the template parameter. */ static tree tsubst_template_parm (tree t, tree args, tsubst_flags_t complain) { tree default_value, parm_decl; if (args == NULL_TREE || t == NULL_TREE || t == error_mark_node) return t; gcc_assert (TREE_CODE (t) == TREE_LIST); default_value = TREE_PURPOSE (t); parm_decl = TREE_VALUE (t); parm_decl = tsubst (parm_decl, args, complain, NULL_TREE); if (TREE_CODE (parm_decl) == PARM_DECL && invalid_nontype_parm_type_p (TREE_TYPE (parm_decl), complain)) parm_decl = error_mark_node; default_value = tsubst_template_arg (default_value, args, complain, NULL_TREE); return build_tree_list (default_value, parm_decl); } /* Substitute the ARGS into the indicated aggregate (or enumeration) type T. If T is not an aggregate or enumeration type, it is handled as if by tsubst. IN_DECL is as for tsubst. If ENTERING_SCOPE is nonzero, T is the context for a template which we are presently tsubst'ing. Return the substituted value. */ static tree tsubst_aggr_type (tree t, tree args, tsubst_flags_t complain, tree in_decl, int entering_scope) { if (t == NULL_TREE) return NULL_TREE; switch (TREE_CODE (t)) { case RECORD_TYPE: if (TYPE_PTRMEMFUNC_P (t)) return tsubst (TYPE_PTRMEMFUNC_FN_TYPE (t), args, complain, in_decl); /* Fall through. */ case ENUMERAL_TYPE: case UNION_TYPE: if (TYPE_TEMPLATE_INFO (t) && uses_template_parms (t)) { tree argvec; tree context; tree r; int saved_unevaluated_operand; int saved_inhibit_evaluation_warnings; /* In "sizeof(X<I>)" we need to evaluate "I". */ saved_unevaluated_operand = cp_unevaluated_operand; cp_unevaluated_operand = 0; saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings; c_inhibit_evaluation_warnings = 0; /* First, determine the context for the type we are looking up. */ context = TYPE_CONTEXT (t); if (context && TYPE_P (context)) { context = tsubst_aggr_type (context, args, complain, in_decl, /*entering_scope=*/1); /* If context is a nested class inside a class template, it may still need to be instantiated (c++/33959). */ context = complete_type (context); } /* Then, figure out what arguments are appropriate for the type we are trying to find. For example, given: template <class T> struct S; template <class T, class U> void f(T, U) { S<U> su; } and supposing that we are instantiating f<int, double>, then our ARGS will be {int, double}, but, when looking up S we only want {double}. */ argvec = tsubst_template_args (TYPE_TI_ARGS (t), args, complain, in_decl); if (argvec == error_mark_node) r = error_mark_node; else { r = lookup_template_class (t, argvec, in_decl, context, entering_scope, complain); r = cp_build_qualified_type_real (r, cp_type_quals (t), complain); } cp_unevaluated_operand = saved_unevaluated_operand; c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings; return r; } else /* This is not a template type, so there's nothing to do. */ return t; default: return tsubst (t, args, complain, in_decl); } } static GTY((cache)) tree_cache_map *defarg_inst; /* Substitute into the default argument ARG (a default argument for FN), which has the indicated TYPE. */ tree tsubst_default_argument (tree fn, int parmnum, tree type, tree arg, tsubst_flags_t complain) { tree saved_class_ptr = NULL_TREE; tree saved_class_ref = NULL_TREE; int errs = errorcount + sorrycount; /* This can happen in invalid code. */ if (TREE_CODE (arg) == DEFAULT_ARG) return arg; tree parm = FUNCTION_FIRST_USER_PARM (fn); parm = chain_index (parmnum, parm); tree parmtype = TREE_TYPE (parm); if (DECL_BY_REFERENCE (parm)) parmtype = TREE_TYPE (parmtype); if (parmtype == error_mark_node) return error_mark_node; gcc_assert (same_type_ignoring_top_level_qualifiers_p (type, parmtype)); tree *slot; if (defarg_inst && (slot = defarg_inst->get (parm))) return *slot; /* This default argument came from a template. Instantiate the default argument here, not in tsubst. In the case of something like: template <class T> struct S { static T t(); void f(T = t()); }; we must be careful to do name lookup in the scope of S<T>, rather than in the current class. */ push_access_scope (fn); /* The "this" pointer is not valid in a default argument. */ if (cfun) { saved_class_ptr = current_class_ptr; cp_function_chain->x_current_class_ptr = NULL_TREE; saved_class_ref = current_class_ref; cp_function_chain->x_current_class_ref = NULL_TREE; } start_lambda_scope (parm); push_deferring_access_checks(dk_no_deferred); /* The default argument expression may cause implicitly defined member functions to be synthesized, which will result in garbage collection. We must treat this situation as if we were within the body of function so as to avoid collecting live data on the stack. */ ++function_depth; arg = tsubst_expr (arg, DECL_TI_ARGS (fn), complain, NULL_TREE, /*integral_constant_expression_p=*/false); --function_depth; pop_deferring_access_checks(); finish_lambda_scope (); /* Restore the "this" pointer. */ if (cfun) { cp_function_chain->x_current_class_ptr = saved_class_ptr; cp_function_chain->x_current_class_ref = saved_class_ref; } if (errorcount+sorrycount > errs && (complain & tf_warning_or_error)) inform (input_location, " when instantiating default argument for call to %qD", fn); /* Make sure the default argument is reasonable. */ arg = check_default_argument (type, arg, complain); pop_access_scope (fn); if (arg != error_mark_node && !cp_unevaluated_operand) { if (!defarg_inst) defarg_inst = tree_cache_map::create_ggc (37); defarg_inst->put (parm, arg); } return arg; } /* Substitute into all the default arguments for FN. */ static void tsubst_default_arguments (tree fn, tsubst_flags_t complain) { tree arg; tree tmpl_args; tmpl_args = DECL_TI_ARGS (fn); /* If this function is not yet instantiated, we certainly don't need its default arguments. */ if (uses_template_parms (tmpl_args)) return; /* Don't do this again for clones. */ if (DECL_CLONED_FUNCTION_P (fn)) return; int i = 0; for (arg = TYPE_ARG_TYPES (TREE_TYPE (fn)); arg; arg = TREE_CHAIN (arg), ++i) if (TREE_PURPOSE (arg)) TREE_PURPOSE (arg) = tsubst_default_argument (fn, i, TREE_VALUE (arg), TREE_PURPOSE (arg), complain); } /* Subroutine of tsubst_decl for the case when T is a FUNCTION_DECL. */ static tree tsubst_function_decl (tree t, tree args, tsubst_flags_t complain, tree lambda_fntype) { tree gen_tmpl, argvec; hashval_t hash = 0; tree in_decl = t; /* Nobody should be tsubst'ing into non-template functions. */ gcc_assert (DECL_TEMPLATE_INFO (t) != NULL_TREE); if (TREE_CODE (DECL_TI_TEMPLATE (t)) == TEMPLATE_DECL) { /* If T is not dependent, just return it. */ if (!uses_template_parms (DECL_TI_ARGS (t))) return t; /* Calculate the most general template of which R is a specialization. */ gen_tmpl = most_general_template (DECL_TI_TEMPLATE (t)); /* We're substituting a lambda function under tsubst_lambda_expr but not directly from it; find the matching function we're already inside. But don't do this if T is a generic lambda with a single level of template parms, as in that case we're doing a normal instantiation. */ if (LAMBDA_FUNCTION_P (t) && !lambda_fntype && (!generic_lambda_fn_p (t) || TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (gen_tmpl)) > 1)) return enclosing_instantiation_of (t); /* Calculate the complete set of arguments used to specialize R. */ argvec = tsubst_template_args (DECL_TI_ARGS (DECL_TEMPLATE_RESULT (DECL_TI_TEMPLATE (t))), args, complain, in_decl); if (argvec == error_mark_node) return error_mark_node; /* Check to see if we already have this specialization. */ if (!lambda_fntype) { hash = hash_tmpl_and_args (gen_tmpl, argvec); if (tree spec = retrieve_specialization (gen_tmpl, argvec, hash)) return spec; } /* We can see more levels of arguments than parameters if there was a specialization of a member template, like this: template <class T> struct S { template <class U> void f(); } template <> template <class U> void S<int>::f(U); Here, we'll be substituting into the specialization, because that's where we can find the code we actually want to generate, but we'll have enough arguments for the most general template. We also deal with the peculiar case: template <class T> struct S { template <class U> friend void f(); }; template <class U> void f() {} template S<int>; template void f<double>(); Here, the ARGS for the instantiation of will be {int, double}. But, we only need as many ARGS as there are levels of template parameters in CODE_PATTERN. We are careful not to get fooled into reducing the ARGS in situations like: template <class T> struct S { template <class U> void f(U); } template <class T> template <> void S<T>::f(int) {} which we can spot because the pattern will be a specialization in this case. */ int args_depth = TMPL_ARGS_DEPTH (args); int parms_depth = TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (DECL_TI_TEMPLATE (t))); if (args_depth > parms_depth && !DECL_TEMPLATE_SPECIALIZATION (t)) args = get_innermost_template_args (args, parms_depth); } else { /* This special case arises when we have something like this: template <class T> struct S { friend void f<int>(int, double); }; Here, the DECL_TI_TEMPLATE for the friend declaration will be an IDENTIFIER_NODE. We are being called from tsubst_friend_function, and we want only to create a new decl (R) with appropriate types so that we can call determine_specialization. */ gen_tmpl = NULL_TREE; argvec = NULL_TREE; } tree closure = (lambda_fntype ? TYPE_METHOD_BASETYPE (lambda_fntype) : NULL_TREE); tree ctx = closure ? closure : DECL_CONTEXT (t); bool member = ctx && TYPE_P (ctx); if (member && !closure) ctx = tsubst_aggr_type (ctx, args, complain, t, /*entering_scope=*/1); tree type = (lambda_fntype ? lambda_fntype : tsubst (TREE_TYPE (t), args, complain | tf_fndecl_type, in_decl)); if (type == error_mark_node) return error_mark_node; /* If we hit excessive deduction depth, the type is bogus even if it isn't error_mark_node, so don't build a decl. */ if (excessive_deduction_depth) return error_mark_node; /* We do NOT check for matching decls pushed separately at this point, as they may not represent instantiations of this template, and in any case are considered separate under the discrete model. */ tree r = copy_decl (t); DECL_USE_TEMPLATE (r) = 0; TREE_TYPE (r) = type; /* Clear out the mangled name and RTL for the instantiation. */ SET_DECL_ASSEMBLER_NAME (r, NULL_TREE); SET_DECL_RTL (r, NULL); /* Leave DECL_INITIAL set on deleted instantiations. */ if (!DECL_DELETED_FN (r)) DECL_INITIAL (r) = NULL_TREE; DECL_CONTEXT (r) = ctx; /* OpenMP UDRs have the only argument a reference to the declared type. We want to diagnose if the declared type is a reference, which is invalid, but as references to references are usually quietly merged, diagnose it here. */ if (DECL_OMP_DECLARE_REDUCTION_P (t)) { tree argtype = TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (t)))); argtype = tsubst (argtype, args, complain, in_decl); if (TREE_CODE (argtype) == REFERENCE_TYPE) error_at (DECL_SOURCE_LOCATION (t), "reference type %qT in " "%<#pragma omp declare reduction%>", argtype); if (strchr (IDENTIFIER_POINTER (DECL_NAME (t)), '~') == NULL) DECL_NAME (r) = omp_reduction_id (ERROR_MARK, DECL_NAME (t), argtype); } if (member && DECL_CONV_FN_P (r)) /* Type-conversion operator. Reconstruct the name, in case it's the name of one of the template's parameters. */ DECL_NAME (r) = make_conv_op_name (TREE_TYPE (type)); tree parms = DECL_ARGUMENTS (t); if (closure) parms = DECL_CHAIN (parms); parms = tsubst (parms, args, complain, t); for (tree parm = parms; parm; parm = DECL_CHAIN (parm)) DECL_CONTEXT (parm) = r; if (closure) { tree tparm = build_this_parm (r, closure, type_memfn_quals (type)); DECL_CHAIN (tparm) = parms; parms = tparm; } DECL_ARGUMENTS (r) = parms; DECL_RESULT (r) = NULL_TREE; TREE_STATIC (r) = 0; TREE_PUBLIC (r) = TREE_PUBLIC (t); DECL_EXTERNAL (r) = 1; /* If this is an instantiation of a function with internal linkage, we already know what object file linkage will be assigned to the instantiation. */ DECL_INTERFACE_KNOWN (r) = !TREE_PUBLIC (r); DECL_DEFER_OUTPUT (r) = 0; DECL_CHAIN (r) = NULL_TREE; DECL_PENDING_INLINE_INFO (r) = 0; DECL_PENDING_INLINE_P (r) = 0; DECL_SAVED_TREE (r) = NULL_TREE; DECL_STRUCT_FUNCTION (r) = NULL; TREE_USED (r) = 0; /* We'll re-clone as appropriate in instantiate_template. */ DECL_CLONED_FUNCTION (r) = NULL_TREE; /* If we aren't complaining now, return on error before we register the specialization so that we'll complain eventually. */ if ((complain & tf_error) == 0 && IDENTIFIER_ANY_OP_P (DECL_NAME (r)) && !grok_op_properties (r, /*complain=*/false)) return error_mark_node; /* When instantiating a constrained member, substitute into the constraints to create a new constraint. */ if (tree ci = get_constraints (t)) if (member) { ci = tsubst_constraint_info (ci, argvec, complain, NULL_TREE); set_constraints (r, ci); } /* Set up the DECL_TEMPLATE_INFO for R. There's no need to do this in the special friend case mentioned above where GEN_TMPL is NULL. */ if (gen_tmpl && !closure) { DECL_TEMPLATE_INFO (r) = build_template_info (gen_tmpl, argvec); SET_DECL_IMPLICIT_INSTANTIATION (r); tree new_r = register_specialization (r, gen_tmpl, argvec, false, hash); if (new_r != r) /* We instantiated this while substituting into the type earlier (template/friend54.C). */ return new_r; /* We're not supposed to instantiate default arguments until they are called, for a template. But, for a declaration like: template <class T> void f () { extern void g(int i = T()); } we should do the substitution when the template is instantiated. We handle the member function case in instantiate_class_template since the default arguments might refer to other members of the class. */ if (!member && !PRIMARY_TEMPLATE_P (gen_tmpl) && !uses_template_parms (argvec)) tsubst_default_arguments (r, complain); } else DECL_TEMPLATE_INFO (r) = NULL_TREE; /* Copy the list of befriending classes. */ for (tree *friends = &DECL_BEFRIENDING_CLASSES (r); *friends; friends = &TREE_CHAIN (*friends)) { *friends = copy_node (*friends); TREE_VALUE (*friends) = tsubst (TREE_VALUE (*friends), args, complain, in_decl); } if (DECL_CONSTRUCTOR_P (r) || DECL_DESTRUCTOR_P (r)) { maybe_retrofit_in_chrg (r); if (DECL_CONSTRUCTOR_P (r) && !grok_ctor_properties (ctx, r)) return error_mark_node; /* If this is an instantiation of a member template, clone it. If it isn't, that'll be handled by clone_constructors_and_destructors. */ if (PRIMARY_TEMPLATE_P (gen_tmpl)) clone_function_decl (r, /*update_methods=*/false); } else if ((complain & tf_error) != 0 && IDENTIFIER_ANY_OP_P (DECL_NAME (r)) && !grok_op_properties (r, /*complain=*/true)) return error_mark_node; if (DECL_FRIEND_P (t) && DECL_FRIEND_CONTEXT (t)) SET_DECL_FRIEND_CONTEXT (r, tsubst (DECL_FRIEND_CONTEXT (t), args, complain, in_decl)); /* Possibly limit visibility based on template args. */ DECL_VISIBILITY (r) = VISIBILITY_DEFAULT; if (DECL_VISIBILITY_SPECIFIED (t)) { DECL_VISIBILITY_SPECIFIED (r) = 0; DECL_ATTRIBUTES (r) = remove_attribute ("visibility", DECL_ATTRIBUTES (r)); } determine_visibility (r); if (DECL_DEFAULTED_OUTSIDE_CLASS_P (r) && !processing_template_decl) defaulted_late_check (r); apply_late_template_attributes (&r, DECL_ATTRIBUTES (r), 0, args, complain, in_decl); return r; } /* Subroutine of tsubst_decl for the case when T is a TEMPLATE_DECL. */ static tree tsubst_template_decl (tree t, tree args, tsubst_flags_t complain, tree lambda_fntype) { /* We can get here when processing a member function template, member class template, or template template parameter. */ tree decl = DECL_TEMPLATE_RESULT (t); tree in_decl = t; tree spec; tree tmpl_args; tree full_args; tree r; hashval_t hash = 0; if (DECL_TEMPLATE_TEMPLATE_PARM_P (t)) { /* Template template parameter is treated here. */ tree new_type = tsubst (TREE_TYPE (t), args, complain, in_decl); if (new_type == error_mark_node) r = error_mark_node; /* If we get a real template back, return it. This can happen in the context of most_specialized_partial_spec. */ else if (TREE_CODE (new_type) == TEMPLATE_DECL) r = new_type; else /* The new TEMPLATE_DECL was built in reduce_template_parm_level. */ r = TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL (new_type); return r; } if (!lambda_fntype) { /* We might already have an instance of this template. The ARGS are for the surrounding class type, so the full args contain the tsubst'd args for the context, plus the innermost args from the template decl. */ tmpl_args = DECL_CLASS_TEMPLATE_P (t) ? CLASSTYPE_TI_ARGS (TREE_TYPE (t)) : DECL_TI_ARGS (DECL_TEMPLATE_RESULT (t)); /* Because this is a template, the arguments will still be dependent, even after substitution. If PROCESSING_TEMPLATE_DECL is not set, the dependency predicates will short-circuit. */ ++processing_template_decl; full_args = tsubst_template_args (tmpl_args, args, complain, in_decl); --processing_template_decl; if (full_args == error_mark_node) return error_mark_node; /* If this is a default template template argument, tsubst might not have changed anything. */ if (full_args == tmpl_args) return t; hash = hash_tmpl_and_args (t, full_args); spec = retrieve_specialization (t, full_args, hash); if (spec != NULL_TREE) return spec; } /* Make a new template decl. It will be similar to the original, but will record the current template arguments. We also create a new function declaration, which is just like the old one, but points to this new template, rather than the old one. */ r = copy_decl (t); gcc_assert (DECL_LANG_SPECIFIC (r) != 0); DECL_CHAIN (r) = NULL_TREE; // Build new template info linking to the original template decl. if (!lambda_fntype) { DECL_TEMPLATE_INFO (r) = build_template_info (t, args); SET_DECL_IMPLICIT_INSTANTIATION (r); } else DECL_TEMPLATE_INFO (r) = NULL_TREE; /* The template parameters for this new template are all the template parameters for the old template, except the outermost level of parameters. */ DECL_TEMPLATE_PARMS (r) = tsubst_template_parms (DECL_TEMPLATE_PARMS (t), args, complain); if (TREE_CODE (decl) == TYPE_DECL && !TYPE_DECL_ALIAS_P (decl)) { tree new_type; ++processing_template_decl; new_type = tsubst (TREE_TYPE (t), args, complain, in_decl); --processing_template_decl; if (new_type == error_mark_node) return error_mark_node; TREE_TYPE (r) = new_type; /* For a partial specialization, we need to keep pointing to the primary template. */ if (!DECL_TEMPLATE_SPECIALIZATION (t)) CLASSTYPE_TI_TEMPLATE (new_type) = r; DECL_TEMPLATE_RESULT (r) = TYPE_MAIN_DECL (new_type); DECL_TI_ARGS (r) = CLASSTYPE_TI_ARGS (new_type); DECL_CONTEXT (r) = TYPE_CONTEXT (new_type); } else { tree new_decl; ++processing_template_decl; if (TREE_CODE (decl) == FUNCTION_DECL) new_decl = tsubst_function_decl (decl, args, complain, lambda_fntype); else new_decl = tsubst (decl, args, complain, in_decl); --processing_template_decl; if (new_decl == error_mark_node) return error_mark_node; DECL_TEMPLATE_RESULT (r) = new_decl; TREE_TYPE (r) = TREE_TYPE (new_decl); DECL_CONTEXT (r) = DECL_CONTEXT (new_decl); if (lambda_fntype) { tree args = template_parms_to_args (DECL_TEMPLATE_PARMS (r)); DECL_TEMPLATE_INFO (new_decl) = build_template_info (r, args); } else { DECL_TI_TEMPLATE (new_decl) = r; DECL_TI_ARGS (r) = DECL_TI_ARGS (new_decl); } } DECL_TEMPLATE_INSTANTIATIONS (r) = NULL_TREE; DECL_TEMPLATE_SPECIALIZATIONS (r) = NULL_TREE; if (PRIMARY_TEMPLATE_P (t)) DECL_PRIMARY_TEMPLATE (r) = r; if (TREE_CODE (decl) != TYPE_DECL && !VAR_P (decl) && !lambda_fntype) /* Record this non-type partial instantiation. */ register_specialization (r, t, DECL_TI_ARGS (DECL_TEMPLATE_RESULT (r)), false, hash); return r; } /* True if FN is the op() for a lambda in an uninstantiated template. */ bool lambda_fn_in_template_p (tree fn) { if (!fn || !LAMBDA_FUNCTION_P (fn)) return false; tree closure = DECL_CONTEXT (fn); return CLASSTYPE_TEMPLATE_INFO (closure) != NULL_TREE; } /* We're instantiating a variable from template function TCTX. Return the corresponding current enclosing scope. This gets complicated because lambda functions in templates are regenerated rather than instantiated, but generic lambda functions are subsequently instantiated. */ static tree enclosing_instantiation_of (tree otctx) { tree tctx = otctx; tree fn = current_function_decl; int lambda_count = 0; for (; tctx && lambda_fn_in_template_p (tctx); tctx = decl_function_context (tctx)) ++lambda_count; for (; fn; fn = decl_function_context (fn)) { tree ofn = fn; int flambda_count = 0; for (; flambda_count < lambda_count && fn && LAMBDA_FUNCTION_P (fn); fn = decl_function_context (fn)) ++flambda_count; if ((fn && DECL_TEMPLATE_INFO (fn)) ? most_general_template (fn) != most_general_template (tctx) : fn != tctx) continue; gcc_assert (DECL_NAME (ofn) == DECL_NAME (otctx) || DECL_CONV_FN_P (ofn)); return ofn; } gcc_unreachable (); } /* Substitute the ARGS into the T, which is a _DECL. Return the result of the substitution. Issue error and warning messages under control of COMPLAIN. */ static tree tsubst_decl (tree t, tree args, tsubst_flags_t complain) { #define RETURN(EXP) do { r = (EXP); goto out; } while(0) location_t saved_loc; tree r = NULL_TREE; tree in_decl = t; hashval_t hash = 0; /* Set the filename and linenumber to improve error-reporting. */ saved_loc = input_location; input_location = DECL_SOURCE_LOCATION (t); switch (TREE_CODE (t)) { case TEMPLATE_DECL: r = tsubst_template_decl (t, args, complain, /*lambda*/NULL_TREE); break; case FUNCTION_DECL: r = tsubst_function_decl (t, args, complain, /*lambda*/NULL_TREE); break; case PARM_DECL: { tree type = NULL_TREE; int i, len = 1; tree expanded_types = NULL_TREE; tree prev_r = NULL_TREE; tree first_r = NULL_TREE; if (DECL_PACK_P (t)) { /* If there is a local specialization that isn't a parameter pack, it means that we're doing a "simple" substitution from inside tsubst_pack_expansion. Just return the local specialization (which will be a single parm). */ tree spec = retrieve_local_specialization (t); if (spec && TREE_CODE (spec) == PARM_DECL && TREE_CODE (TREE_TYPE (spec)) != TYPE_PACK_EXPANSION) RETURN (spec); /* Expand the TYPE_PACK_EXPANSION that provides the types for the parameters in this function parameter pack. */ expanded_types = tsubst_pack_expansion (TREE_TYPE (t), args, complain, in_decl); if (TREE_CODE (expanded_types) == TREE_VEC) { len = TREE_VEC_LENGTH (expanded_types); /* Zero-length parameter packs are boring. Just substitute into the chain. */ if (len == 0) RETURN (tsubst (TREE_CHAIN (t), args, complain, TREE_CHAIN (t))); } else { /* All we did was update the type. Make a note of that. */ type = expanded_types; expanded_types = NULL_TREE; } } /* Loop through all of the parameters we'll build. When T is a function parameter pack, LEN is the number of expanded types in EXPANDED_TYPES; otherwise, LEN is 1. */ r = NULL_TREE; for (i = 0; i < len; ++i) { prev_r = r; r = copy_node (t); if (DECL_TEMPLATE_PARM_P (t)) SET_DECL_TEMPLATE_PARM_P (r); if (expanded_types) /* We're on the Ith parameter of the function parameter pack. */ { /* Get the Ith type. */ type = TREE_VEC_ELT (expanded_types, i); /* Rename the parameter to include the index. */ DECL_NAME (r) = make_ith_pack_parameter_name (DECL_NAME (r), i); } else if (!type) /* We're dealing with a normal parameter. */ type = tsubst (TREE_TYPE (t), args, complain, in_decl); type = type_decays_to (type); TREE_TYPE (r) = type; cp_apply_type_quals_to_decl (cp_type_quals (type), r); if (DECL_INITIAL (r)) { if (TREE_CODE (DECL_INITIAL (r)) != TEMPLATE_PARM_INDEX) DECL_INITIAL (r) = TREE_TYPE (r); else DECL_INITIAL (r) = tsubst (DECL_INITIAL (r), args, complain, in_decl); } DECL_CONTEXT (r) = NULL_TREE; if (!DECL_TEMPLATE_PARM_P (r)) DECL_ARG_TYPE (r) = type_passed_as (type); apply_late_template_attributes (&r, DECL_ATTRIBUTES (r), 0, args, complain, in_decl); /* Keep track of the first new parameter we generate. That's what will be returned to the caller. */ if (!first_r) first_r = r; /* Build a proper chain of parameters when substituting into a function parameter pack. */ if (prev_r) DECL_CHAIN (prev_r) = r; } /* If cp_unevaluated_operand is set, we're just looking for a single dummy parameter, so don't keep going. */ if (DECL_CHAIN (t) && !cp_unevaluated_operand) DECL_CHAIN (r) = tsubst (DECL_CHAIN (t), args, complain, DECL_CHAIN (t)); /* FIRST_R contains the start of the chain we've built. */ r = first_r; } break; case FIELD_DECL: { tree type = NULL_TREE; tree vec = NULL_TREE; tree expanded_types = NULL_TREE; int len = 1; if (PACK_EXPANSION_P (TREE_TYPE (t))) { /* This field is a lambda capture pack. Return a TREE_VEC of the expanded fields to instantiate_class_template_1. */ expanded_types = tsubst_pack_expansion (TREE_TYPE (t), args, complain, in_decl); if (TREE_CODE (expanded_types) == TREE_VEC) { len = TREE_VEC_LENGTH (expanded_types); vec = make_tree_vec (len); } else { /* All we did was update the type. Make a note of that. */ type = expanded_types; expanded_types = NULL_TREE; } } for (int i = 0; i < len; ++i) { r = copy_decl (t); if (expanded_types) { type = TREE_VEC_ELT (expanded_types, i); DECL_NAME (r) = make_ith_pack_parameter_name (DECL_NAME (r), i); } else if (!type) type = tsubst (TREE_TYPE (t), args, complain, in_decl); if (type == error_mark_node) RETURN (error_mark_node); TREE_TYPE (r) = type; cp_apply_type_quals_to_decl (cp_type_quals (type), r); if (DECL_C_BIT_FIELD (r)) /* For bit-fields, DECL_BIT_FIELD_REPRESENTATIVE gives the number of bits. */ DECL_BIT_FIELD_REPRESENTATIVE (r) = tsubst_expr (DECL_BIT_FIELD_REPRESENTATIVE (t), args, complain, in_decl, /*integral_constant_expression_p=*/true); if (DECL_INITIAL (t)) { /* Set up DECL_TEMPLATE_INFO so that we can get at the NSDMI in perform_member_init. Still set DECL_INITIAL so that we know there is one. */ DECL_INITIAL (r) = void_node; gcc_assert (DECL_LANG_SPECIFIC (r) == NULL); retrofit_lang_decl (r); DECL_TEMPLATE_INFO (r) = build_template_info (t, args); } /* We don't have to set DECL_CONTEXT here; it is set by finish_member_declaration. */ DECL_CHAIN (r) = NULL_TREE; apply_late_template_attributes (&r, DECL_ATTRIBUTES (r), 0, args, complain, in_decl); if (vec) TREE_VEC_ELT (vec, i) = r; } if (vec) r = vec; } break; case USING_DECL: /* We reach here only for member using decls. We also need to check uses_template_parms because DECL_DEPENDENT_P is not set for a using-declaration that designates a member of the current instantiation (c++/53549). */ if (DECL_DEPENDENT_P (t) || uses_template_parms (USING_DECL_SCOPE (t))) { tree scope = USING_DECL_SCOPE (t); tree name = tsubst_copy (DECL_NAME (t), args, complain, in_decl); if (PACK_EXPANSION_P (scope)) { tree vec = tsubst_pack_expansion (scope, args, complain, in_decl); int len = TREE_VEC_LENGTH (vec); r = make_tree_vec (len); for (int i = 0; i < len; ++i) { tree escope = TREE_VEC_ELT (vec, i); tree elt = do_class_using_decl (escope, name); if (!elt) { r = error_mark_node; break; } else { TREE_PROTECTED (elt) = TREE_PROTECTED (t); TREE_PRIVATE (elt) = TREE_PRIVATE (t); } TREE_VEC_ELT (r, i) = elt; } } else { tree inst_scope = tsubst_copy (USING_DECL_SCOPE (t), args, complain, in_decl); r = do_class_using_decl (inst_scope, name); if (!r) r = error_mark_node; else { TREE_PROTECTED (r) = TREE_PROTECTED (t); TREE_PRIVATE (r) = TREE_PRIVATE (t); } } } else { r = copy_node (t); DECL_CHAIN (r) = NULL_TREE; } break; case TYPE_DECL: case VAR_DECL: { tree argvec = NULL_TREE; tree gen_tmpl = NULL_TREE; tree spec; tree tmpl = NULL_TREE; tree ctx; tree type = NULL_TREE; bool local_p; if (TREE_TYPE (t) == error_mark_node) RETURN (error_mark_node); if (TREE_CODE (t) == TYPE_DECL && t == TYPE_MAIN_DECL (TREE_TYPE (t))) { /* If this is the canonical decl, we don't have to mess with instantiations, and often we can't (for typename, template type parms and such). Note that TYPE_NAME is not correct for the above test if we've copied the type for a typedef. */ type = tsubst (TREE_TYPE (t), args, complain, in_decl); if (type == error_mark_node) RETURN (error_mark_node); r = TYPE_NAME (type); break; } /* Check to see if we already have the specialization we need. */ spec = NULL_TREE; if (DECL_CLASS_SCOPE_P (t) || DECL_NAMESPACE_SCOPE_P (t)) { /* T is a static data member or namespace-scope entity. We have to substitute into namespace-scope variables (not just variable templates) because of cases like: template <class T> void f() { extern T t; } where the entity referenced is not known until instantiation time. */ local_p = false; ctx = DECL_CONTEXT (t); if (DECL_CLASS_SCOPE_P (t)) { ctx = tsubst_aggr_type (ctx, args, complain, in_decl, /*entering_scope=*/1); /* If CTX is unchanged, then T is in fact the specialization we want. That situation occurs when referencing a static data member within in its own class. We can use pointer equality, rather than same_type_p, because DECL_CONTEXT is always canonical... */ if (ctx == DECL_CONTEXT (t) /* ... unless T is a member template; in which case our caller can be willing to create a specialization of that template represented by T. */ && !(DECL_TI_TEMPLATE (t) && DECL_MEMBER_TEMPLATE_P (DECL_TI_TEMPLATE (t)))) spec = t; } if (!spec) { tmpl = DECL_TI_TEMPLATE (t); gen_tmpl = most_general_template (tmpl); argvec = tsubst (DECL_TI_ARGS (t), args, complain, in_decl); if (argvec != error_mark_node) argvec = (coerce_innermost_template_parms (DECL_TEMPLATE_PARMS (gen_tmpl), argvec, t, complain, /*all*/true, /*defarg*/true)); if (argvec == error_mark_node) RETURN (error_mark_node); hash = hash_tmpl_and_args (gen_tmpl, argvec); spec = retrieve_specialization (gen_tmpl, argvec, hash); } } else { /* A local variable. */ local_p = true; /* Subsequent calls to pushdecl will fill this in. */ ctx = NULL_TREE; /* Unless this is a reference to a static variable from an enclosing function, in which case we need to fill it in now. */ if (TREE_STATIC (t)) { tree fn = enclosing_instantiation_of (DECL_CONTEXT (t)); if (fn != current_function_decl) ctx = fn; } spec = retrieve_local_specialization (t); } /* If we already have the specialization we need, there is nothing more to do. */ if (spec) { r = spec; break; } /* Create a new node for the specialization we need. */ r = copy_decl (t); if (type == NULL_TREE) { if (is_typedef_decl (t)) type = DECL_ORIGINAL_TYPE (t); else type = TREE_TYPE (t); if (VAR_P (t) && VAR_HAD_UNKNOWN_BOUND (t) && type != error_mark_node) type = strip_array_domain (type); tree sub_args = args; if (tree auto_node = type_uses_auto (type)) { /* Mask off any template args past the variable's context so we don't replace the auto with an unrelated argument. */ int nouter = TEMPLATE_TYPE_LEVEL (auto_node) - 1; int extra = TMPL_ARGS_DEPTH (args) - nouter; if (extra > 0) /* This should never happen with the new lambda instantiation model, but keep the handling just in case. */ gcc_assert (!CHECKING_P), sub_args = strip_innermost_template_args (args, extra); } type = tsubst (type, sub_args, complain, in_decl); } if (VAR_P (r)) { /* Even if the original location is out of scope, the newly substituted one is not. */ DECL_DEAD_FOR_LOCAL (r) = 0; DECL_INITIALIZED_P (r) = 0; DECL_TEMPLATE_INSTANTIATED (r) = 0; if (type == error_mark_node) RETURN (error_mark_node); if (TREE_CODE (type) == FUNCTION_TYPE) { /* It may seem that this case cannot occur, since: typedef void f(); void g() { f x; } declares a function, not a variable. However: typedef void f(); template <typename T> void g() { T t; } template void g<f>(); is an attempt to declare a variable with function type. */ error ("variable %qD has function type", /* R is not yet sufficiently initialized, so we just use its name. */ DECL_NAME (r)); RETURN (error_mark_node); } type = complete_type (type); /* Wait until cp_finish_decl to set this again, to handle circular dependency (template/instantiate6.C). */ DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (r) = 0; type = check_var_type (DECL_NAME (r), type); if (DECL_HAS_VALUE_EXPR_P (t)) { tree ve = DECL_VALUE_EXPR (t); ve = tsubst_expr (ve, args, complain, in_decl, /*constant_expression_p=*/false); if (REFERENCE_REF_P (ve)) { gcc_assert (TREE_CODE (type) == REFERENCE_TYPE); ve = TREE_OPERAND (ve, 0); } SET_DECL_VALUE_EXPR (r, ve); } if (CP_DECL_THREAD_LOCAL_P (r) && !processing_template_decl) set_decl_tls_model (r, decl_default_tls_model (r)); } else if (DECL_SELF_REFERENCE_P (t)) SET_DECL_SELF_REFERENCE_P (r); TREE_TYPE (r) = type; cp_apply_type_quals_to_decl (cp_type_quals (type), r); DECL_CONTEXT (r) = ctx; /* Clear out the mangled name and RTL for the instantiation. */ SET_DECL_ASSEMBLER_NAME (r, NULL_TREE); if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_DECL_WRTL)) SET_DECL_RTL (r, NULL); /* The initializer must not be expanded until it is required; see [temp.inst]. */ DECL_INITIAL (r) = NULL_TREE; DECL_SIZE (r) = DECL_SIZE_UNIT (r) = 0; if (VAR_P (r)) { if (DECL_LANG_SPECIFIC (r)) SET_DECL_DEPENDENT_INIT_P (r, false); SET_DECL_MODE (r, VOIDmode); /* Possibly limit visibility based on template args. */ DECL_VISIBILITY (r) = VISIBILITY_DEFAULT; if (DECL_VISIBILITY_SPECIFIED (t)) { DECL_VISIBILITY_SPECIFIED (r) = 0; DECL_ATTRIBUTES (r) = remove_attribute ("visibility", DECL_ATTRIBUTES (r)); } determine_visibility (r); } if (!local_p) { /* A static data member declaration is always marked external when it is declared in-class, even if an initializer is present. We mimic the non-template processing here. */ DECL_EXTERNAL (r) = 1; if (DECL_NAMESPACE_SCOPE_P (t)) DECL_NOT_REALLY_EXTERN (r) = 1; DECL_TEMPLATE_INFO (r) = build_template_info (tmpl, argvec); SET_DECL_IMPLICIT_INSTANTIATION (r); register_specialization (r, gen_tmpl, argvec, false, hash); } else { if (DECL_LANG_SPECIFIC (r)) DECL_TEMPLATE_INFO (r) = NULL_TREE; if (!cp_unevaluated_operand) register_local_specialization (r, t); } DECL_CHAIN (r) = NULL_TREE; apply_late_template_attributes (&r, DECL_ATTRIBUTES (r), /*flags=*/0, args, complain, in_decl); /* Preserve a typedef that names a type. */ if (is_typedef_decl (r) && type != error_mark_node) { DECL_ORIGINAL_TYPE (r) = NULL_TREE; set_underlying_type (r); if (TYPE_DECL_ALIAS_P (r)) /* An alias template specialization can be dependent even if its underlying type is not. */ TYPE_DEPENDENT_P_VALID (TREE_TYPE (r)) = false; } layout_decl (r, 0); } break; default: gcc_unreachable (); } #undef RETURN out: /* Restore the file and line information. */ input_location = saved_loc; return r; } /* Substitute into the ARG_TYPES of a function type. If END is a TREE_CHAIN, leave it and any following types un-substituted. */ static tree tsubst_arg_types (tree arg_types, tree args, tree end, tsubst_flags_t complain, tree in_decl) { tree remaining_arg_types; tree type = NULL_TREE; int i = 1; tree expanded_args = NULL_TREE; tree default_arg; if (!arg_types || arg_types == void_list_node || arg_types == end) return arg_types; remaining_arg_types = tsubst_arg_types (TREE_CHAIN (arg_types), args, end, complain, in_decl); if (remaining_arg_types == error_mark_node) return error_mark_node; if (PACK_EXPANSION_P (TREE_VALUE (arg_types))) { /* For a pack expansion, perform substitution on the entire expression. Later on, we'll handle the arguments one-by-one. */ expanded_args = tsubst_pack_expansion (TREE_VALUE (arg_types), args, complain, in_decl); if (TREE_CODE (expanded_args) == TREE_VEC) /* So that we'll spin through the parameters, one by one. */ i = TREE_VEC_LENGTH (expanded_args); else { /* We only partially substituted into the parameter pack. Our type is TYPE_PACK_EXPANSION. */ type = expanded_args; expanded_args = NULL_TREE; } } while (i > 0) { --i; if (expanded_args) type = TREE_VEC_ELT (expanded_args, i); else if (!type) type = tsubst (TREE_VALUE (arg_types), args, complain, in_decl); if (type == error_mark_node) return error_mark_node; if (VOID_TYPE_P (type)) { if (complain & tf_error) { error ("invalid parameter type %qT", type); if (in_decl) error ("in declaration %q+D", in_decl); } return error_mark_node; } /* DR 657. */ if (abstract_virtuals_error_sfinae (ACU_PARM, type, complain)) return error_mark_node; /* Do array-to-pointer, function-to-pointer conversion, and ignore top-level qualifiers as required. */ type = cv_unqualified (type_decays_to (type)); /* We do not substitute into default arguments here. The standard mandates that they be instantiated only when needed, which is done in build_over_call. */ default_arg = TREE_PURPOSE (arg_types); /* Except that we do substitute default arguments under tsubst_lambda_expr, since the new op() won't have any associated template arguments for us to refer to later. */ if (lambda_fn_in_template_p (in_decl)) default_arg = tsubst_copy_and_build (default_arg, args, complain, in_decl, false/*fn*/, false/*constexpr*/); if (default_arg && TREE_CODE (default_arg) == DEFAULT_ARG) { /* We've instantiated a template before its default arguments have been parsed. This can happen for a nested template class, and is not an error unless we require the default argument in a call of this function. */ remaining_arg_types = tree_cons (default_arg, type, remaining_arg_types); vec_safe_push (DEFARG_INSTANTIATIONS(default_arg), remaining_arg_types); } else remaining_arg_types = hash_tree_cons (default_arg, type, remaining_arg_types); } return remaining_arg_types; } /* Substitute into a FUNCTION_TYPE or METHOD_TYPE. This routine does *not* handle the exception-specification for FNTYPE, because the initial substitution of explicitly provided template parameters during argument deduction forbids substitution into the exception-specification: [temp.deduct] All references in the function type of the function template to the corresponding template parameters are replaced by the specified tem- plate argument values. If a substitution in a template parameter or in the function type of the function template results in an invalid type, type deduction fails. [Note: The equivalent substitution in exception specifications is done only when the function is instanti- ated, at which point a program is ill-formed if the substitution results in an invalid type.] */ static tree tsubst_function_type (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree return_type; tree arg_types = NULL_TREE; tree fntype; /* The TYPE_CONTEXT is not used for function/method types. */ gcc_assert (TYPE_CONTEXT (t) == NULL_TREE); /* DR 1227: Mixing immediate and non-immediate contexts in deduction failure. */ bool late_return_type_p = TYPE_HAS_LATE_RETURN_TYPE (t); if (late_return_type_p) { /* Substitute the argument types. */ arg_types = tsubst_arg_types (TYPE_ARG_TYPES (t), args, NULL_TREE, complain, in_decl); if (arg_types == error_mark_node) return error_mark_node; tree save_ccp = current_class_ptr; tree save_ccr = current_class_ref; tree this_type = (TREE_CODE (t) == METHOD_TYPE ? TREE_TYPE (TREE_VALUE (arg_types)) : NULL_TREE); bool do_inject = this_type && CLASS_TYPE_P (this_type); if (do_inject) { /* DR 1207: 'this' is in scope in the trailing return type. */ inject_this_parameter (this_type, cp_type_quals (this_type)); } /* Substitute the return type. */ return_type = tsubst (TREE_TYPE (t), args, complain, in_decl); if (do_inject) { current_class_ptr = save_ccp; current_class_ref = save_ccr; } } else /* Substitute the return type. */ return_type = tsubst (TREE_TYPE (t), args, complain, in_decl); if (return_type == error_mark_node) return error_mark_node; /* DR 486 clarifies that creation of a function type with an invalid return type is a deduction failure. */ if (TREE_CODE (return_type) == ARRAY_TYPE || TREE_CODE (return_type) == FUNCTION_TYPE) { if (complain & tf_error) { if (TREE_CODE (return_type) == ARRAY_TYPE) error ("function returning an array"); else error ("function returning a function"); } return error_mark_node; } /* And DR 657. */ if (abstract_virtuals_error_sfinae (ACU_RETURN, return_type, complain)) return error_mark_node; if (!late_return_type_p) { /* Substitute the argument types. */ arg_types = tsubst_arg_types (TYPE_ARG_TYPES (t), args, NULL_TREE, complain, in_decl); if (arg_types == error_mark_node) return error_mark_node; } /* Construct a new type node and return it. */ if (TREE_CODE (t) == FUNCTION_TYPE) { fntype = build_function_type (return_type, arg_types); fntype = apply_memfn_quals (fntype, type_memfn_quals (t), type_memfn_rqual (t)); } else { tree r = TREE_TYPE (TREE_VALUE (arg_types)); /* Don't pick up extra function qualifiers from the basetype. */ r = cp_build_qualified_type_real (r, type_memfn_quals (t), complain); if (! MAYBE_CLASS_TYPE_P (r)) { /* [temp.deduct] Type deduction may fail for any of the following reasons: -- Attempting to create "pointer to member of T" when T is not a class type. */ if (complain & tf_error) error ("creating pointer to member function of non-class type %qT", r); return error_mark_node; } fntype = build_method_type_directly (r, return_type, TREE_CHAIN (arg_types)); fntype = build_ref_qualified_type (fntype, type_memfn_rqual (t)); } fntype = cp_build_type_attribute_variant (fntype, TYPE_ATTRIBUTES (t)); if (late_return_type_p) TYPE_HAS_LATE_RETURN_TYPE (fntype) = 1; return fntype; } /* FNTYPE is a FUNCTION_TYPE or METHOD_TYPE. Substitute the template ARGS into that specification, and return the substituted specification. If there is no specification, return NULL_TREE. */ static tree tsubst_exception_specification (tree fntype, tree args, tsubst_flags_t complain, tree in_decl, bool defer_ok) { tree specs; tree new_specs; specs = TYPE_RAISES_EXCEPTIONS (fntype); new_specs = NULL_TREE; if (specs && TREE_PURPOSE (specs)) { /* A noexcept-specifier. */ tree expr = TREE_PURPOSE (specs); if (TREE_CODE (expr) == INTEGER_CST) new_specs = expr; else if (defer_ok) { /* Defer instantiation of noexcept-specifiers to avoid excessive instantiations (c++/49107). */ new_specs = make_node (DEFERRED_NOEXCEPT); if (DEFERRED_NOEXCEPT_SPEC_P (specs)) { /* We already partially instantiated this member template, so combine the new args with the old. */ DEFERRED_NOEXCEPT_PATTERN (new_specs) = DEFERRED_NOEXCEPT_PATTERN (expr); DEFERRED_NOEXCEPT_ARGS (new_specs) = add_to_template_args (DEFERRED_NOEXCEPT_ARGS (expr), args); } else { DEFERRED_NOEXCEPT_PATTERN (new_specs) = expr; DEFERRED_NOEXCEPT_ARGS (new_specs) = args; } } else new_specs = tsubst_copy_and_build (expr, args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/true); new_specs = build_noexcept_spec (new_specs, complain); } else if (specs) { if (! TREE_VALUE (specs)) new_specs = specs; else while (specs) { tree spec; int i, len = 1; tree expanded_specs = NULL_TREE; if (PACK_EXPANSION_P (TREE_VALUE (specs))) { /* Expand the pack expansion type. */ expanded_specs = tsubst_pack_expansion (TREE_VALUE (specs), args, complain, in_decl); if (expanded_specs == error_mark_node) return error_mark_node; else if (TREE_CODE (expanded_specs) == TREE_VEC) len = TREE_VEC_LENGTH (expanded_specs); else { /* We're substituting into a member template, so we got a TYPE_PACK_EXPANSION back. Add that expansion and move on. */ gcc_assert (TREE_CODE (expanded_specs) == TYPE_PACK_EXPANSION); new_specs = add_exception_specifier (new_specs, expanded_specs, complain); specs = TREE_CHAIN (specs); continue; } } for (i = 0; i < len; ++i) { if (expanded_specs) spec = TREE_VEC_ELT (expanded_specs, i); else spec = tsubst (TREE_VALUE (specs), args, complain, in_decl); if (spec == error_mark_node) return spec; new_specs = add_exception_specifier (new_specs, spec, complain); } specs = TREE_CHAIN (specs); } } return new_specs; } /* Take the tree structure T and replace template parameters used therein with the argument vector ARGS. IN_DECL is an associated decl for diagnostics. If an error occurs, returns ERROR_MARK_NODE. Issue error and warning messages under control of COMPLAIN. Note that we must be relatively non-tolerant of extensions here, in order to preserve conformance; if we allow substitutions that should not be allowed, we may allow argument deductions that should not succeed, and therefore report ambiguous overload situations where there are none. In theory, we could allow the substitution, but indicate that it should have failed, and allow our caller to make sure that the right thing happens, but we don't try to do this yet. This function is used for dealing with types, decls and the like; for expressions, use tsubst_expr or tsubst_copy. */ tree tsubst (tree t, tree args, tsubst_flags_t complain, tree in_decl) { enum tree_code code; tree type, r = NULL_TREE; if (t == NULL_TREE || t == error_mark_node || t == integer_type_node || t == void_type_node || t == char_type_node || t == unknown_type_node || TREE_CODE (t) == NAMESPACE_DECL || TREE_CODE (t) == TRANSLATION_UNIT_DECL) return t; if (DECL_P (t)) return tsubst_decl (t, args, complain); if (args == NULL_TREE) return t; code = TREE_CODE (t); if (code == IDENTIFIER_NODE) type = IDENTIFIER_TYPE_VALUE (t); else type = TREE_TYPE (t); gcc_assert (type != unknown_type_node); /* Reuse typedefs. We need to do this to handle dependent attributes, such as attribute aligned. */ if (TYPE_P (t) && typedef_variant_p (t)) { tree decl = TYPE_NAME (t); if (alias_template_specialization_p (t)) { /* DECL represents an alias template and we want to instantiate it. */ tree tmpl = most_general_template (DECL_TI_TEMPLATE (decl)); tree gen_args = tsubst (DECL_TI_ARGS (decl), args, complain, in_decl); r = instantiate_alias_template (tmpl, gen_args, complain); } else if (DECL_CLASS_SCOPE_P (decl) && CLASSTYPE_TEMPLATE_INFO (DECL_CONTEXT (decl)) && uses_template_parms (DECL_CONTEXT (decl))) { tree tmpl = most_general_template (DECL_TI_TEMPLATE (decl)); tree gen_args = tsubst (DECL_TI_ARGS (decl), args, complain, in_decl); r = retrieve_specialization (tmpl, gen_args, 0); } else if (DECL_FUNCTION_SCOPE_P (decl) && DECL_TEMPLATE_INFO (DECL_CONTEXT (decl)) && uses_template_parms (DECL_TI_ARGS (DECL_CONTEXT (decl)))) r = retrieve_local_specialization (decl); else /* The typedef is from a non-template context. */ return t; if (r) { r = TREE_TYPE (r); r = cp_build_qualified_type_real (r, cp_type_quals (t) | cp_type_quals (r), complain | tf_ignore_bad_quals); return r; } else { /* We don't have an instantiation yet, so drop the typedef. */ int quals = cp_type_quals (t); t = DECL_ORIGINAL_TYPE (decl); t = cp_build_qualified_type_real (t, quals, complain | tf_ignore_bad_quals); } } bool fndecl_type = (complain & tf_fndecl_type); complain &= ~tf_fndecl_type; if (type && code != TYPENAME_TYPE && code != TEMPLATE_TYPE_PARM && code != TEMPLATE_PARM_INDEX && code != IDENTIFIER_NODE && code != FUNCTION_TYPE && code != METHOD_TYPE) type = tsubst (type, args, complain, in_decl); if (type == error_mark_node) return error_mark_node; switch (code) { case RECORD_TYPE: case UNION_TYPE: case ENUMERAL_TYPE: return tsubst_aggr_type (t, args, complain, in_decl, /*entering_scope=*/0); case ERROR_MARK: case IDENTIFIER_NODE: case VOID_TYPE: case REAL_TYPE: case COMPLEX_TYPE: case VECTOR_TYPE: case BOOLEAN_TYPE: case NULLPTR_TYPE: case LANG_TYPE: return t; case INTEGER_TYPE: if (t == integer_type_node) return t; if (TREE_CODE (TYPE_MIN_VALUE (t)) == INTEGER_CST && TREE_CODE (TYPE_MAX_VALUE (t)) == INTEGER_CST) return t; { tree max, omax = TREE_OPERAND (TYPE_MAX_VALUE (t), 0); max = tsubst_expr (omax, args, complain, in_decl, /*integral_constant_expression_p=*/false); /* Fix up type of the magic NOP_EXPR with TREE_SIDE_EFFECTS if needed. */ if (TREE_CODE (max) == NOP_EXPR && TREE_SIDE_EFFECTS (omax) && !TREE_TYPE (max)) TREE_TYPE (max) = TREE_TYPE (TREE_OPERAND (max, 0)); /* If we're in a partial instantiation, preserve the magic NOP_EXPR with TREE_SIDE_EFFECTS that indicates this is not an integral constant expression. */ if (processing_template_decl && TREE_SIDE_EFFECTS (omax) && TREE_CODE (omax) == NOP_EXPR) { gcc_assert (TREE_CODE (max) == NOP_EXPR); TREE_SIDE_EFFECTS (max) = 1; } return compute_array_index_type (NULL_TREE, max, complain); } case TEMPLATE_TYPE_PARM: case TEMPLATE_TEMPLATE_PARM: case BOUND_TEMPLATE_TEMPLATE_PARM: case TEMPLATE_PARM_INDEX: { int idx; int level; int levels; tree arg = NULL_TREE; /* Early in template argument deduction substitution, we don't want to reduce the level of 'auto', or it will be confused with a normal template parm in subsequent deduction. */ if (is_auto (t) && (complain & tf_partial)) return t; r = NULL_TREE; gcc_assert (TREE_VEC_LENGTH (args) > 0); template_parm_level_and_index (t, &level, &idx); levels = TMPL_ARGS_DEPTH (args); if (level <= levels && TREE_VEC_LENGTH (TMPL_ARGS_LEVEL (args, level)) > 0) { arg = TMPL_ARG (args, level, idx); /* See through ARGUMENT_PACK_SELECT arguments. */ if (arg && TREE_CODE (arg) == ARGUMENT_PACK_SELECT) arg = argument_pack_select_arg (arg); } if (arg == error_mark_node) return error_mark_node; else if (arg != NULL_TREE) { if (ARGUMENT_PACK_P (arg)) /* If ARG is an argument pack, we don't actually want to perform a substitution here, because substitutions for argument packs are only done element-by-element. We can get to this point when substituting the type of a non-type template parameter pack, when that type actually contains template parameter packs from an outer template, e.g., template<typename... Types> struct A { template<Types... Values> struct B { }; }; */ return t; if (code == TEMPLATE_TYPE_PARM) { int quals; gcc_assert (TYPE_P (arg)); quals = cp_type_quals (arg) | cp_type_quals (t); return cp_build_qualified_type_real (arg, quals, complain | tf_ignore_bad_quals); } else if (code == BOUND_TEMPLATE_TEMPLATE_PARM) { /* We are processing a type constructed from a template template parameter. */ tree argvec = tsubst (TYPE_TI_ARGS (t), args, complain, in_decl); if (argvec == error_mark_node) return error_mark_node; gcc_assert (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM || TREE_CODE (arg) == TEMPLATE_DECL || TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE); if (TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE) /* Consider this code: template <template <class> class Template> struct Internal { template <class Arg> using Bind = Template<Arg>; }; template <template <class> class Template, class Arg> using Instantiate = Template<Arg>; //#0 template <template <class> class Template, class Argument> using Bind = Instantiate<Internal<Template>::template Bind, Argument>; //#1 When #1 is parsed, the BOUND_TEMPLATE_TEMPLATE_PARM representing the parameter `Template' in #0 matches the UNBOUND_CLASS_TEMPLATE representing the argument `Internal<Template>::template Bind'; We then want to assemble the type `Bind<Argument>' that can't be fully created right now, because `Internal<Template>' not being complete, the Bind template cannot be looked up in that context. So we need to "store" `Bind<Argument>' for later when the context of Bind becomes complete. Let's store that in a TYPENAME_TYPE. */ return make_typename_type (TYPE_CONTEXT (arg), build_nt (TEMPLATE_ID_EXPR, TYPE_IDENTIFIER (arg), argvec), typename_type, complain); /* We can get a TEMPLATE_TEMPLATE_PARM here when we are resolving nested-types in the signature of a member function templates. Otherwise ARG is a TEMPLATE_DECL and is the real template to be instantiated. */ if (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM) arg = TYPE_NAME (arg); r = lookup_template_class (arg, argvec, in_decl, DECL_CONTEXT (arg), /*entering_scope=*/0, complain); return cp_build_qualified_type_real (r, cp_type_quals (t) | cp_type_quals (r), complain); } else if (code == TEMPLATE_TEMPLATE_PARM) return arg; else /* TEMPLATE_PARM_INDEX. */ return convert_from_reference (unshare_expr (arg)); } if (level == 1) /* This can happen during the attempted tsubst'ing in unify. This means that we don't yet have any information about the template parameter in question. */ return t; /* If we get here, we must have been looking at a parm for a more deeply nested template. Make a new version of this template parameter, but with a lower level. */ switch (code) { case TEMPLATE_TYPE_PARM: case TEMPLATE_TEMPLATE_PARM: case BOUND_TEMPLATE_TEMPLATE_PARM: if (cp_type_quals (t)) { r = tsubst (TYPE_MAIN_VARIANT (t), args, complain, in_decl); r = cp_build_qualified_type_real (r, cp_type_quals (t), complain | (code == TEMPLATE_TYPE_PARM ? tf_ignore_bad_quals : 0)); } else if (TREE_CODE (t) == TEMPLATE_TYPE_PARM && PLACEHOLDER_TYPE_CONSTRAINTS (t) && (r = (TEMPLATE_PARM_DESCENDANTS (TEMPLATE_TYPE_PARM_INDEX (t)))) && (r = TREE_TYPE (r)) && !PLACEHOLDER_TYPE_CONSTRAINTS (r)) /* Break infinite recursion when substituting the constraints of a constrained placeholder. */; else { r = copy_type (t); TEMPLATE_TYPE_PARM_INDEX (r) = reduce_template_parm_level (TEMPLATE_TYPE_PARM_INDEX (t), r, levels, args, complain); TYPE_STUB_DECL (r) = TYPE_NAME (r) = TEMPLATE_TYPE_DECL (r); TYPE_MAIN_VARIANT (r) = r; TYPE_POINTER_TO (r) = NULL_TREE; TYPE_REFERENCE_TO (r) = NULL_TREE; if (TREE_CODE (t) == TEMPLATE_TYPE_PARM) { /* Propagate constraints on placeholders. */ if (tree constr = PLACEHOLDER_TYPE_CONSTRAINTS (t)) PLACEHOLDER_TYPE_CONSTRAINTS (r) = tsubst_constraint (constr, args, complain, in_decl); else if (tree pl = CLASS_PLACEHOLDER_TEMPLATE (t)) { pl = tsubst_copy (pl, args, complain, in_decl); CLASS_PLACEHOLDER_TEMPLATE (r) = pl; } } if (TREE_CODE (r) == TEMPLATE_TEMPLATE_PARM) /* We have reduced the level of the template template parameter, but not the levels of its template parameters, so canonical_type_parameter will not be able to find the canonical template template parameter for this level. Thus, we require structural equality checking to compare TEMPLATE_TEMPLATE_PARMs. */ SET_TYPE_STRUCTURAL_EQUALITY (r); else if (TYPE_STRUCTURAL_EQUALITY_P (t)) SET_TYPE_STRUCTURAL_EQUALITY (r); else TYPE_CANONICAL (r) = canonical_type_parameter (r); if (code == BOUND_TEMPLATE_TEMPLATE_PARM) { tree tinfo = TYPE_TEMPLATE_INFO (t); /* We might need to substitute into the types of non-type template parameters. */ tree tmpl = tsubst (TI_TEMPLATE (tinfo), args, complain, in_decl); if (tmpl == error_mark_node) return error_mark_node; tree argvec = tsubst (TI_ARGS (tinfo), args, complain, in_decl); if (argvec == error_mark_node) return error_mark_node; TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (r) = build_template_info (tmpl, argvec); } } break; case TEMPLATE_PARM_INDEX: /* OK, now substitute the type of the non-type parameter. We couldn't do it earlier because it might be an auto parameter, and we wouldn't need to if we had an argument. */ type = tsubst (type, args, complain, in_decl); if (type == error_mark_node) return error_mark_node; r = reduce_template_parm_level (t, type, levels, args, complain); break; default: gcc_unreachable (); } return r; } case TREE_LIST: { tree purpose, value, chain; if (t == void_list_node) return t; purpose = TREE_PURPOSE (t); if (purpose) { purpose = tsubst (purpose, args, complain, in_decl); if (purpose == error_mark_node) return error_mark_node; } value = TREE_VALUE (t); if (value) { value = tsubst (value, args, complain, in_decl); if (value == error_mark_node) return error_mark_node; } chain = TREE_CHAIN (t); if (chain && chain != void_type_node) { chain = tsubst (chain, args, complain, in_decl); if (chain == error_mark_node) return error_mark_node; } if (purpose == TREE_PURPOSE (t) && value == TREE_VALUE (t) && chain == TREE_CHAIN (t)) return t; return hash_tree_cons (purpose, value, chain); } case TREE_BINFO: /* We should never be tsubsting a binfo. */ gcc_unreachable (); case TREE_VEC: /* A vector of template arguments. */ gcc_assert (!type); return tsubst_template_args (t, args, complain, in_decl); case POINTER_TYPE: case REFERENCE_TYPE: { if (type == TREE_TYPE (t) && TREE_CODE (type) != METHOD_TYPE) return t; /* [temp.deduct] Type deduction may fail for any of the following reasons: -- Attempting to create a pointer to reference type. -- Attempting to create a reference to a reference type or a reference to void. Core issue 106 says that creating a reference to a reference during instantiation is no longer a cause for failure. We only enforce this check in strict C++98 mode. */ if ((TREE_CODE (type) == REFERENCE_TYPE && (((cxx_dialect == cxx98) && flag_iso) || code != REFERENCE_TYPE)) || (code == REFERENCE_TYPE && VOID_TYPE_P (type))) { static location_t last_loc; /* We keep track of the last time we issued this error message to avoid spewing a ton of messages during a single bad template instantiation. */ if (complain & tf_error && last_loc != input_location) { if (VOID_TYPE_P (type)) error ("forming reference to void"); else if (code == POINTER_TYPE) error ("forming pointer to reference type %qT", type); else error ("forming reference to reference type %qT", type); last_loc = input_location; } return error_mark_node; } else if (TREE_CODE (type) == FUNCTION_TYPE && (type_memfn_quals (type) != TYPE_UNQUALIFIED || type_memfn_rqual (type) != REF_QUAL_NONE)) { if (complain & tf_error) { if (code == POINTER_TYPE) error ("forming pointer to qualified function type %qT", type); else error ("forming reference to qualified function type %qT", type); } return error_mark_node; } else if (code == POINTER_TYPE) { r = build_pointer_type (type); if (TREE_CODE (type) == METHOD_TYPE) r = build_ptrmemfunc_type (r); } else if (TREE_CODE (type) == REFERENCE_TYPE) /* In C++0x, during template argument substitution, when there is an attempt to create a reference to a reference type, reference collapsing is applied as described in [14.3.1/4 temp.arg.type]: "If a template-argument for a template-parameter T names a type that is a reference to a type A, an attempt to create the type 'lvalue reference to cv T' creates the type 'lvalue reference to A,' while an attempt to create the type type rvalue reference to cv T' creates the type T" */ r = cp_build_reference_type (TREE_TYPE (type), TYPE_REF_IS_RVALUE (t) && TYPE_REF_IS_RVALUE (type)); else r = cp_build_reference_type (type, TYPE_REF_IS_RVALUE (t)); r = cp_build_qualified_type_real (r, cp_type_quals (t), complain); if (r != error_mark_node) /* Will this ever be needed for TYPE_..._TO values? */ layout_type (r); return r; } case OFFSET_TYPE: { r = tsubst (TYPE_OFFSET_BASETYPE (t), args, complain, in_decl); if (r == error_mark_node || !MAYBE_CLASS_TYPE_P (r)) { /* [temp.deduct] Type deduction may fail for any of the following reasons: -- Attempting to create "pointer to member of T" when T is not a class type. */ if (complain & tf_error) error ("creating pointer to member of non-class type %qT", r); return error_mark_node; } if (TREE_CODE (type) == REFERENCE_TYPE) { if (complain & tf_error) error ("creating pointer to member reference type %qT", type); return error_mark_node; } if (VOID_TYPE_P (type)) { if (complain & tf_error) error ("creating pointer to member of type void"); return error_mark_node; } gcc_assert (TREE_CODE (type) != METHOD_TYPE); if (TREE_CODE (type) == FUNCTION_TYPE) { /* The type of the implicit object parameter gets its cv-qualifiers from the FUNCTION_TYPE. */ tree memptr; tree method_type = build_memfn_type (type, r, type_memfn_quals (type), type_memfn_rqual (type)); memptr = build_ptrmemfunc_type (build_pointer_type (method_type)); return cp_build_qualified_type_real (memptr, cp_type_quals (t), complain); } else return cp_build_qualified_type_real (build_ptrmem_type (r, type), cp_type_quals (t), complain); } case FUNCTION_TYPE: case METHOD_TYPE: { tree fntype; tree specs; fntype = tsubst_function_type (t, args, complain, in_decl); if (fntype == error_mark_node) return error_mark_node; /* Substitute the exception specification. */ specs = tsubst_exception_specification (t, args, complain, in_decl, /*defer_ok*/fndecl_type); if (specs == error_mark_node) return error_mark_node; if (specs) fntype = build_exception_variant (fntype, specs); return fntype; } case ARRAY_TYPE: { tree domain = tsubst (TYPE_DOMAIN (t), args, complain, in_decl); if (domain == error_mark_node) return error_mark_node; /* As an optimization, we avoid regenerating the array type if it will obviously be the same as T. */ if (type == TREE_TYPE (t) && domain == TYPE_DOMAIN (t)) return t; /* These checks should match the ones in create_array_type_for_decl. [temp.deduct] The deduction may fail for any of the following reasons: -- Attempting to create an array with an element type that is void, a function type, or a reference type, or [DR337] an abstract class type. */ if (VOID_TYPE_P (type) || TREE_CODE (type) == FUNCTION_TYPE || (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE) || TREE_CODE (type) == REFERENCE_TYPE) { if (complain & tf_error) error ("creating array of %qT", type); return error_mark_node; } if (abstract_virtuals_error_sfinae (ACU_ARRAY, type, complain)) return error_mark_node; r = build_cplus_array_type (type, domain); if (TYPE_USER_ALIGN (t)) { SET_TYPE_ALIGN (r, TYPE_ALIGN (t)); TYPE_USER_ALIGN (r) = 1; } return r; } case TYPENAME_TYPE: { tree ctx = tsubst_aggr_type (TYPE_CONTEXT (t), args, complain, in_decl, /*entering_scope=*/1); if (ctx == error_mark_node) return error_mark_node; tree f = tsubst_copy (TYPENAME_TYPE_FULLNAME (t), args, complain, in_decl); if (f == error_mark_node) return error_mark_node; if (!MAYBE_CLASS_TYPE_P (ctx)) { if (complain & tf_error) error ("%qT is not a class, struct, or union type", ctx); return error_mark_node; } else if (!uses_template_parms (ctx) && !TYPE_BEING_DEFINED (ctx)) { /* Normally, make_typename_type does not require that the CTX have complete type in order to allow things like: template <class T> struct S { typename S<T>::X Y; }; But, such constructs have already been resolved by this point, so here CTX really should have complete type, unless it's a partial instantiation. */ ctx = complete_type (ctx); if (!COMPLETE_TYPE_P (ctx)) { if (complain & tf_error) cxx_incomplete_type_error (NULL_TREE, ctx); return error_mark_node; } } f = make_typename_type (ctx, f, typename_type, complain | tf_keep_type_decl); if (f == error_mark_node) return f; if (TREE_CODE (f) == TYPE_DECL) { complain |= tf_ignore_bad_quals; f = TREE_TYPE (f); } if (TREE_CODE (f) != TYPENAME_TYPE) { if (TYPENAME_IS_ENUM_P (t) && TREE_CODE (f) != ENUMERAL_TYPE) { if (complain & tf_error) error ("%qT resolves to %qT, which is not an enumeration type", t, f); else return error_mark_node; } else if (TYPENAME_IS_CLASS_P (t) && !CLASS_TYPE_P (f)) { if (complain & tf_error) error ("%qT resolves to %qT, which is is not a class type", t, f); else return error_mark_node; } } return cp_build_qualified_type_real (f, cp_type_quals (f) | cp_type_quals (t), complain); } case UNBOUND_CLASS_TEMPLATE: { tree ctx = tsubst_aggr_type (TYPE_CONTEXT (t), args, complain, in_decl, /*entering_scope=*/1); tree name = TYPE_IDENTIFIER (t); tree parm_list = DECL_TEMPLATE_PARMS (TYPE_NAME (t)); if (ctx == error_mark_node || name == error_mark_node) return error_mark_node; if (parm_list) parm_list = tsubst_template_parms (parm_list, args, complain); return make_unbound_class_template (ctx, name, parm_list, complain); } case TYPEOF_TYPE: { tree type; ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; type = tsubst_expr (TYPEOF_TYPE_EXPR (t), args, complain, in_decl, /*integral_constant_expression_p=*/false); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; type = finish_typeof (type); return cp_build_qualified_type_real (type, cp_type_quals (t) | cp_type_quals (type), complain); } case DECLTYPE_TYPE: { tree type; ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; type = tsubst_copy_and_build (DECLTYPE_TYPE_EXPR (t), args, complain|tf_decltype, in_decl, /*function_p*/false, /*integral_constant_expression*/false); if (DECLTYPE_FOR_INIT_CAPTURE (t)) { if (type == NULL_TREE) { if (complain & tf_error) error ("empty initializer in lambda init-capture"); type = error_mark_node; } else if (TREE_CODE (type) == TREE_LIST) type = build_x_compound_expr_from_list (type, ELK_INIT, complain); } --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; if (DECLTYPE_FOR_LAMBDA_CAPTURE (t)) type = lambda_capture_field_type (type, DECLTYPE_FOR_INIT_CAPTURE (t), DECLTYPE_FOR_REF_CAPTURE (t)); else if (DECLTYPE_FOR_LAMBDA_PROXY (t)) type = lambda_proxy_type (type); else { bool id = DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P (t); if (id && TREE_CODE (DECLTYPE_TYPE_EXPR (t)) == BIT_NOT_EXPR && EXPR_P (type)) /* In a template ~id could be either a complement expression or an unqualified-id naming a destructor; if instantiating it produces an expression, it's not an id-expression or member access. */ id = false; type = finish_decltype_type (type, id, complain); } return cp_build_qualified_type_real (type, cp_type_quals (t) | cp_type_quals (type), complain | tf_ignore_bad_quals); } case UNDERLYING_TYPE: { tree type = tsubst (UNDERLYING_TYPE_TYPE (t), args, complain, in_decl); return finish_underlying_type (type); } case TYPE_ARGUMENT_PACK: case NONTYPE_ARGUMENT_PACK: { tree r; if (code == NONTYPE_ARGUMENT_PACK) r = make_node (code); else r = cxx_make_type (code); tree pack_args = ARGUMENT_PACK_ARGS (t); pack_args = tsubst_template_args (pack_args, args, complain, in_decl); SET_ARGUMENT_PACK_ARGS (r, pack_args); return r; } case VOID_CST: case INTEGER_CST: case REAL_CST: case STRING_CST: case PLUS_EXPR: case MINUS_EXPR: case NEGATE_EXPR: case NOP_EXPR: case INDIRECT_REF: case ADDR_EXPR: case CALL_EXPR: case ARRAY_REF: case SCOPE_REF: /* We should use one of the expression tsubsts for these codes. */ gcc_unreachable (); default: sorry ("use of %qs in template", get_tree_code_name (code)); return error_mark_node; } } /* tsubst a BASELINK. OBJECT_TYPE, if non-NULL, is the type of the expression on the left-hand side of the "." or "->" operator. We only do the lookup if we had a dependent BASELINK. Otherwise we adjust it onto the instantiated heirarchy. */ static tree tsubst_baselink (tree baselink, tree object_type, tree args, tsubst_flags_t complain, tree in_decl) { bool qualified_p = BASELINK_QUALIFIED_P (baselink); tree qualifying_scope = BINFO_TYPE (BASELINK_ACCESS_BINFO (baselink)); qualifying_scope = tsubst (qualifying_scope, args, complain, in_decl); tree optype = BASELINK_OPTYPE (baselink); optype = tsubst (optype, args, complain, in_decl); tree template_args = NULL_TREE; bool template_id_p = false; tree fns = BASELINK_FUNCTIONS (baselink); if (TREE_CODE (fns) == TEMPLATE_ID_EXPR) { template_id_p = true; template_args = TREE_OPERAND (fns, 1); fns = TREE_OPERAND (fns, 0); if (template_args) template_args = tsubst_template_args (template_args, args, complain, in_decl); } tree binfo_type = BINFO_TYPE (BASELINK_BINFO (baselink)); binfo_type = tsubst (binfo_type, args, complain, in_decl); bool dependent_p = binfo_type != BINFO_TYPE (BASELINK_BINFO (baselink)); if (dependent_p) { tree name = OVL_NAME (fns); if (IDENTIFIER_CONV_OP_P (name)) name = make_conv_op_name (optype); if (name == complete_dtor_identifier) /* Treat as-if non-dependent below. */ dependent_p = false; baselink = lookup_fnfields (qualifying_scope, name, /*protect=*/1); if (!baselink) { if ((complain & tf_error) && constructor_name_p (name, qualifying_scope)) error ("cannot call constructor %<%T::%D%> directly", qualifying_scope, name); return error_mark_node; } if (BASELINK_P (baselink)) fns = BASELINK_FUNCTIONS (baselink); } else /* We're going to overwrite pieces below, make a duplicate. */ baselink = copy_node (baselink); /* If lookup found a single function, mark it as used at this point. (If lookup found multiple functions the one selected later by overload resolution will be marked as used at that point.) */ if (!template_id_p && !really_overloaded_fn (fns)) { tree fn = OVL_FIRST (fns); bool ok = mark_used (fn, complain); if (!ok && !(complain & tf_error)) return error_mark_node; if (ok && BASELINK_P (baselink)) /* We might have instantiated an auto function. */ TREE_TYPE (baselink) = TREE_TYPE (fn); } if (BASELINK_P (baselink)) { /* Add back the template arguments, if present. */ if (template_id_p) BASELINK_FUNCTIONS (baselink) = build2 (TEMPLATE_ID_EXPR, unknown_type_node, fns, template_args); /* Update the conversion operator type. */ BASELINK_OPTYPE (baselink) = optype; } if (!object_type) object_type = current_class_type; if (qualified_p || !dependent_p) { baselink = adjust_result_of_qualified_name_lookup (baselink, qualifying_scope, object_type); if (!qualified_p) /* We need to call adjust_result_of_qualified_name_lookup in case the destructor names a base class, but we unset BASELINK_QUALIFIED_P so that we still get virtual function binding. */ BASELINK_QUALIFIED_P (baselink) = false; } return baselink; } /* Like tsubst_expr for a SCOPE_REF, given by QUALIFIED_ID. DONE is true if the qualified-id will be a postfix-expression in-and-of itself; false if more of the postfix-expression follows the QUALIFIED_ID. ADDRESS_P is true if the qualified-id is the operand of "&". */ static tree tsubst_qualified_id (tree qualified_id, tree args, tsubst_flags_t complain, tree in_decl, bool done, bool address_p) { tree expr; tree scope; tree name; bool is_template; tree template_args; location_t loc = UNKNOWN_LOCATION; gcc_assert (TREE_CODE (qualified_id) == SCOPE_REF); /* Figure out what name to look up. */ name = TREE_OPERAND (qualified_id, 1); if (TREE_CODE (name) == TEMPLATE_ID_EXPR) { is_template = true; loc = EXPR_LOCATION (name); template_args = TREE_OPERAND (name, 1); if (template_args) template_args = tsubst_template_args (template_args, args, complain, in_decl); if (template_args == error_mark_node) return error_mark_node; name = TREE_OPERAND (name, 0); } else { is_template = false; template_args = NULL_TREE; } /* Substitute into the qualifying scope. When there are no ARGS, we are just trying to simplify a non-dependent expression. In that case the qualifying scope may be dependent, and, in any case, substituting will not help. */ scope = TREE_OPERAND (qualified_id, 0); if (args) { scope = tsubst (scope, args, complain, in_decl); expr = tsubst_copy (name, args, complain, in_decl); } else expr = name; if (dependent_scope_p (scope)) { if (is_template) expr = build_min_nt_loc (loc, TEMPLATE_ID_EXPR, expr, template_args); tree r = build_qualified_name (NULL_TREE, scope, expr, QUALIFIED_NAME_IS_TEMPLATE (qualified_id)); REF_PARENTHESIZED_P (r) = REF_PARENTHESIZED_P (qualified_id); return r; } if (!BASELINK_P (name) && !DECL_P (expr)) { if (TREE_CODE (expr) == BIT_NOT_EXPR) { /* A BIT_NOT_EXPR is used to represent a destructor. */ if (!check_dtor_name (scope, TREE_OPERAND (expr, 0))) { error ("qualifying type %qT does not match destructor name ~%qT", scope, TREE_OPERAND (expr, 0)); expr = error_mark_node; } else expr = lookup_qualified_name (scope, complete_dtor_identifier, /*is_type_p=*/0, false); } else expr = lookup_qualified_name (scope, expr, /*is_type_p=*/0, false); if (TREE_CODE (TREE_CODE (expr) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (expr) : expr) == TYPE_DECL) { if (complain & tf_error) { error ("dependent-name %qE is parsed as a non-type, but " "instantiation yields a type", qualified_id); inform (input_location, "say %<typename %E%> if a type is meant", qualified_id); } return error_mark_node; } } if (DECL_P (expr)) { check_accessibility_of_qualified_id (expr, /*object_type=*/NULL_TREE, scope); /* Remember that there was a reference to this entity. */ if (!mark_used (expr, complain) && !(complain & tf_error)) return error_mark_node; } if (expr == error_mark_node || TREE_CODE (expr) == TREE_LIST) { if (complain & tf_error) qualified_name_lookup_error (scope, TREE_OPERAND (qualified_id, 1), expr, input_location); return error_mark_node; } if (is_template) { /* We may be repeating a check already done during parsing, but if it was well-formed and passed then, it will pass again now, and if it didn't, we wouldn't have got here. The case we want to catch is when we couldn't tell then, and can now, namely when templ prior to substitution was an identifier. */ if (flag_concepts && check_auto_in_tmpl_args (expr, template_args)) return error_mark_node; if (variable_template_p (expr)) expr = lookup_and_finish_template_variable (expr, template_args, complain); else expr = lookup_template_function (expr, template_args); } if (expr == error_mark_node && complain & tf_error) qualified_name_lookup_error (scope, TREE_OPERAND (qualified_id, 1), expr, input_location); else if (TYPE_P (scope)) { expr = (adjust_result_of_qualified_name_lookup (expr, scope, current_nonlambda_class_type ())); expr = (finish_qualified_id_expr (scope, expr, done, address_p && PTRMEM_OK_P (qualified_id), QUALIFIED_NAME_IS_TEMPLATE (qualified_id), /*template_arg_p=*/false, complain)); } /* Expressions do not generally have reference type. */ if (TREE_CODE (expr) != SCOPE_REF /* However, if we're about to form a pointer-to-member, we just want the referenced member referenced. */ && TREE_CODE (expr) != OFFSET_REF) expr = convert_from_reference (expr); if (REF_PARENTHESIZED_P (qualified_id)) expr = force_paren_expr (expr); return expr; } /* tsubst the initializer for a VAR_DECL. INIT is the unsubstituted initializer, DECL is the substituted VAR_DECL. Other arguments are as for tsubst. */ static tree tsubst_init (tree init, tree decl, tree args, tsubst_flags_t complain, tree in_decl) { if (!init) return NULL_TREE; init = tsubst_expr (init, args, complain, in_decl, false); if (!init && TREE_TYPE (decl) != error_mark_node) { /* If we had an initializer but it instantiated to nothing, value-initialize the object. This will only occur when the initializer was a pack expansion where the parameter packs used in that expansion were of length zero. */ init = build_value_init (TREE_TYPE (decl), complain); if (TREE_CODE (init) == AGGR_INIT_EXPR) init = get_target_expr_sfinae (init, complain); if (TREE_CODE (init) == TARGET_EXPR) TARGET_EXPR_DIRECT_INIT_P (init) = true; } return init; } /* Like tsubst, but deals with expressions. This function just replaces template parms; to finish processing the resultant expression, use tsubst_copy_and_build or tsubst_expr. */ static tree tsubst_copy (tree t, tree args, tsubst_flags_t complain, tree in_decl) { enum tree_code code; tree r; if (t == NULL_TREE || t == error_mark_node || args == NULL_TREE) return t; code = TREE_CODE (t); switch (code) { case PARM_DECL: r = retrieve_local_specialization (t); if (r == NULL_TREE) { /* We get here for a use of 'this' in an NSDMI. */ if (DECL_NAME (t) == this_identifier && current_class_ptr) return current_class_ptr; /* This can happen for a parameter name used later in a function declaration (such as in a late-specified return type). Just make a dummy decl, since it's only used for its type. */ gcc_assert (cp_unevaluated_operand != 0); r = tsubst_decl (t, args, complain); /* Give it the template pattern as its context; its true context hasn't been instantiated yet and this is good enough for mangling. */ DECL_CONTEXT (r) = DECL_CONTEXT (t); } if (TREE_CODE (r) == ARGUMENT_PACK_SELECT) r = argument_pack_select_arg (r); if (!mark_used (r, complain) && !(complain & tf_error)) return error_mark_node; return r; case CONST_DECL: { tree enum_type; tree v; if (DECL_TEMPLATE_PARM_P (t)) return tsubst_copy (DECL_INITIAL (t), args, complain, in_decl); /* There is no need to substitute into namespace-scope enumerators. */ if (DECL_NAMESPACE_SCOPE_P (t)) return t; /* If ARGS is NULL, then T is known to be non-dependent. */ if (args == NULL_TREE) return scalar_constant_value (t); /* Unfortunately, we cannot just call lookup_name here. Consider: template <int I> int f() { enum E { a = I }; struct S { void g() { E e = a; } }; }; When we instantiate f<7>::S::g(), say, lookup_name is not clever enough to find f<7>::a. */ enum_type = tsubst_aggr_type (DECL_CONTEXT (t), args, complain, in_decl, /*entering_scope=*/0); for (v = TYPE_VALUES (enum_type); v != NULL_TREE; v = TREE_CHAIN (v)) if (TREE_PURPOSE (v) == DECL_NAME (t)) return TREE_VALUE (v); /* We didn't find the name. That should never happen; if name-lookup found it during preliminary parsing, we should find it again here during instantiation. */ gcc_unreachable (); } return t; case FIELD_DECL: if (DECL_CONTEXT (t)) { tree ctx; ctx = tsubst_aggr_type (DECL_CONTEXT (t), args, complain, in_decl, /*entering_scope=*/1); if (ctx != DECL_CONTEXT (t)) { tree r = lookup_field (ctx, DECL_NAME (t), 0, false); if (!r) { if (complain & tf_error) error ("using invalid field %qD", t); return error_mark_node; } return r; } } return t; case VAR_DECL: case FUNCTION_DECL: if (DECL_LANG_SPECIFIC (t) && DECL_TEMPLATE_INFO (t)) r = tsubst (t, args, complain, in_decl); else if (local_variable_p (t) && uses_template_parms (DECL_CONTEXT (t))) { r = retrieve_local_specialization (t); if (r == NULL_TREE) { /* First try name lookup to find the instantiation. */ r = lookup_name (DECL_NAME (t)); if (r && !is_capture_proxy (r)) { /* Make sure that the one we found is the one we want. */ tree ctx = enclosing_instantiation_of (DECL_CONTEXT (t)); if (ctx != DECL_CONTEXT (r)) r = NULL_TREE; } if (r) /* OK */; else { /* This can happen for a variable used in a late-specified return type of a local lambda, or for a local static or constant. Building a new VAR_DECL should be OK in all those cases. */ r = tsubst_decl (t, args, complain); if (local_specializations) /* Avoid infinite recursion (79640). */ register_local_specialization (r, t); if (decl_maybe_constant_var_p (r)) { /* We can't call cp_finish_decl, so handle the initializer by hand. */ tree init = tsubst_init (DECL_INITIAL (t), r, args, complain, in_decl); if (!processing_template_decl) init = maybe_constant_init (init); if (processing_template_decl ? potential_constant_expression (init) : reduced_constant_expression_p (init)) DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (r) = TREE_CONSTANT (r) = true; DECL_INITIAL (r) = init; if (tree auto_node = type_uses_auto (TREE_TYPE (r))) TREE_TYPE (r) = do_auto_deduction (TREE_TYPE (r), init, auto_node, complain, adc_variable_type); } gcc_assert (cp_unevaluated_operand || TREE_STATIC (r) || decl_constant_var_p (r) || errorcount || sorrycount); if (!processing_template_decl && !TREE_STATIC (r)) r = process_outer_var_ref (r, complain); } /* Remember this for subsequent uses. */ if (local_specializations) register_local_specialization (r, t); } if (TREE_CODE (r) == ARGUMENT_PACK_SELECT) r = argument_pack_select_arg (r); } else r = t; if (!mark_used (r, complain)) return error_mark_node; return r; case NAMESPACE_DECL: return t; case OVERLOAD: /* An OVERLOAD will always be a non-dependent overload set; an overload set from function scope will just be represented with an IDENTIFIER_NODE, and from class scope with a BASELINK. */ gcc_assert (!uses_template_parms (t)); /* We must have marked any lookups as persistent. */ gcc_assert (!OVL_LOOKUP_P (t) || OVL_USED_P (t)); return t; case BASELINK: return tsubst_baselink (t, current_nonlambda_class_type (), args, complain, in_decl); case TEMPLATE_DECL: if (DECL_TEMPLATE_TEMPLATE_PARM_P (t)) return tsubst (TREE_TYPE (DECL_TEMPLATE_RESULT (t)), args, complain, in_decl); else if (DECL_FUNCTION_TEMPLATE_P (t) && DECL_MEMBER_TEMPLATE_P (t)) return tsubst (t, args, complain, in_decl); else if (DECL_CLASS_SCOPE_P (t) && uses_template_parms (DECL_CONTEXT (t))) { /* Template template argument like the following example need special treatment: template <template <class> class TT> struct C {}; template <class T> struct D { template <class U> struct E {}; C<E> c; // #1 }; D<int> d; // #2 We are processing the template argument `E' in #1 for the template instantiation #2. Originally, `E' is a TEMPLATE_DECL with `D<T>' as its DECL_CONTEXT. Now we have to substitute this with one having context `D<int>'. */ tree context = tsubst (DECL_CONTEXT (t), args, complain, in_decl); if (dependent_scope_p (context)) { /* When rewriting a constructor into a deduction guide, a non-dependent name can become dependent, so memtmpl<args> becomes context::template memtmpl<args>. */ tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); return build_qualified_name (type, context, DECL_NAME (t), /*template*/true); } return lookup_field (context, DECL_NAME(t), 0, false); } else /* Ordinary template template argument. */ return t; case NON_LVALUE_EXPR: case VIEW_CONVERT_EXPR: { /* Handle location wrappers by substituting the wrapped node first, *then* reusing the resulting type. Doing the type first ensures that we handle template parameters and parameter pack expansions. */ gcc_assert (location_wrapper_p (t)); tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); return maybe_wrap_with_location (op0, EXPR_LOCATION (t)); } case CAST_EXPR: case REINTERPRET_CAST_EXPR: case CONST_CAST_EXPR: case STATIC_CAST_EXPR: case DYNAMIC_CAST_EXPR: case IMPLICIT_CONV_EXPR: case CONVERT_EXPR: case NOP_EXPR: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); return build1 (code, type, op0); } case SIZEOF_EXPR: if (PACK_EXPANSION_P (TREE_OPERAND (t, 0)) || ARGUMENT_PACK_P (TREE_OPERAND (t, 0))) { tree expanded, op = TREE_OPERAND (t, 0); int len = 0; if (SIZEOF_EXPR_TYPE_P (t)) op = TREE_TYPE (op); ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; /* We only want to compute the number of arguments. */ if (PACK_EXPANSION_P (op)) expanded = tsubst_pack_expansion (op, args, complain, in_decl); else expanded = tsubst_template_args (ARGUMENT_PACK_ARGS (op), args, complain, in_decl); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; if (TREE_CODE (expanded) == TREE_VEC) { len = TREE_VEC_LENGTH (expanded); /* Set TREE_USED for the benefit of -Wunused. */ for (int i = 0; i < len; i++) if (DECL_P (TREE_VEC_ELT (expanded, i))) TREE_USED (TREE_VEC_ELT (expanded, i)) = true; } if (expanded == error_mark_node) return error_mark_node; else if (PACK_EXPANSION_P (expanded) || (TREE_CODE (expanded) == TREE_VEC && pack_expansion_args_count (expanded))) { if (PACK_EXPANSION_P (expanded)) /* OK. */; else if (TREE_VEC_LENGTH (expanded) == 1) expanded = TREE_VEC_ELT (expanded, 0); else expanded = make_argument_pack (expanded); if (TYPE_P (expanded)) return cxx_sizeof_or_alignof_type (expanded, SIZEOF_EXPR, false, complain & tf_error); else return cxx_sizeof_or_alignof_expr (expanded, SIZEOF_EXPR, complain & tf_error); } else return build_int_cst (size_type_node, len); } if (SIZEOF_EXPR_TYPE_P (t)) { r = tsubst (TREE_TYPE (TREE_OPERAND (t, 0)), args, complain, in_decl); r = build1 (NOP_EXPR, r, error_mark_node); r = build1 (SIZEOF_EXPR, tsubst (TREE_TYPE (t), args, complain, in_decl), r); SIZEOF_EXPR_TYPE_P (r) = 1; return r; } /* Fall through */ case INDIRECT_REF: case NEGATE_EXPR: case TRUTH_NOT_EXPR: case BIT_NOT_EXPR: case ADDR_EXPR: case UNARY_PLUS_EXPR: /* Unary + */ case ALIGNOF_EXPR: case AT_ENCODE_EXPR: case ARROW_EXPR: case THROW_EXPR: case TYPEID_EXPR: case REALPART_EXPR: case IMAGPART_EXPR: case PAREN_EXPR: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); r = build1 (code, type, op0); if (code == ALIGNOF_EXPR) ALIGNOF_EXPR_STD_P (r) = ALIGNOF_EXPR_STD_P (t); return r; } case COMPONENT_REF: { tree object; tree name; object = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); name = TREE_OPERAND (t, 1); if (TREE_CODE (name) == BIT_NOT_EXPR) { name = tsubst_copy (TREE_OPERAND (name, 0), args, complain, in_decl); name = build1 (BIT_NOT_EXPR, NULL_TREE, name); } else if (TREE_CODE (name) == SCOPE_REF && TREE_CODE (TREE_OPERAND (name, 1)) == BIT_NOT_EXPR) { tree base = tsubst_copy (TREE_OPERAND (name, 0), args, complain, in_decl); name = TREE_OPERAND (name, 1); name = tsubst_copy (TREE_OPERAND (name, 0), args, complain, in_decl); name = build1 (BIT_NOT_EXPR, NULL_TREE, name); name = build_qualified_name (/*type=*/NULL_TREE, base, name, /*template_p=*/false); } else if (BASELINK_P (name)) name = tsubst_baselink (name, non_reference (TREE_TYPE (object)), args, complain, in_decl); else name = tsubst_copy (name, args, complain, in_decl); return build_nt (COMPONENT_REF, object, name, NULL_TREE); } case PLUS_EXPR: case MINUS_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case EXACT_DIV_EXPR: case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case TRUNC_MOD_EXPR: case FLOOR_MOD_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case RSHIFT_EXPR: case LSHIFT_EXPR: case RROTATE_EXPR: case LROTATE_EXPR: case EQ_EXPR: case NE_EXPR: case MAX_EXPR: case MIN_EXPR: case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR: case COMPOUND_EXPR: case DOTSTAR_EXPR: case MEMBER_REF: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); return build_nt (code, op0, op1); } case SCOPE_REF: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); return build_qualified_name (/*type=*/NULL_TREE, op0, op1, QUALIFIED_NAME_IS_TEMPLATE (t)); } case ARRAY_REF: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); return build_nt (ARRAY_REF, op0, op1, NULL_TREE, NULL_TREE); } case CALL_EXPR: { int n = VL_EXP_OPERAND_LENGTH (t); tree result = build_vl_exp (CALL_EXPR, n); int i; for (i = 0; i < n; i++) TREE_OPERAND (t, i) = tsubst_copy (TREE_OPERAND (t, i), args, complain, in_decl); return result; } case COND_EXPR: case MODOP_EXPR: case PSEUDO_DTOR_EXPR: case VEC_PERM_EXPR: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); tree op2 = tsubst_copy (TREE_OPERAND (t, 2), args, complain, in_decl); r = build_nt (code, op0, op1, op2); TREE_NO_WARNING (r) = TREE_NO_WARNING (t); return r; } case NEW_EXPR: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); tree op2 = tsubst_copy (TREE_OPERAND (t, 2), args, complain, in_decl); r = build_nt (code, op0, op1, op2); NEW_EXPR_USE_GLOBAL (r) = NEW_EXPR_USE_GLOBAL (t); return r; } case DELETE_EXPR: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); r = build_nt (code, op0, op1); DELETE_EXPR_USE_GLOBAL (r) = DELETE_EXPR_USE_GLOBAL (t); DELETE_EXPR_USE_VEC (r) = DELETE_EXPR_USE_VEC (t); return r; } case TEMPLATE_ID_EXPR: { /* Substituted template arguments */ tree fn = TREE_OPERAND (t, 0); tree targs = TREE_OPERAND (t, 1); fn = tsubst_copy (fn, args, complain, in_decl); if (targs) targs = tsubst_template_args (targs, args, complain, in_decl); return lookup_template_function (fn, targs); } case TREE_LIST: { tree purpose, value, chain; if (t == void_list_node) return t; purpose = TREE_PURPOSE (t); if (purpose) purpose = tsubst_copy (purpose, args, complain, in_decl); value = TREE_VALUE (t); if (value) value = tsubst_copy (value, args, complain, in_decl); chain = TREE_CHAIN (t); if (chain && chain != void_type_node) chain = tsubst_copy (chain, args, complain, in_decl); if (purpose == TREE_PURPOSE (t) && value == TREE_VALUE (t) && chain == TREE_CHAIN (t)) return t; return tree_cons (purpose, value, chain); } case RECORD_TYPE: case UNION_TYPE: case ENUMERAL_TYPE: case INTEGER_TYPE: case TEMPLATE_TYPE_PARM: case TEMPLATE_TEMPLATE_PARM: case BOUND_TEMPLATE_TEMPLATE_PARM: case TEMPLATE_PARM_INDEX: case POINTER_TYPE: case REFERENCE_TYPE: case OFFSET_TYPE: case FUNCTION_TYPE: case METHOD_TYPE: case ARRAY_TYPE: case TYPENAME_TYPE: case UNBOUND_CLASS_TEMPLATE: case TYPEOF_TYPE: case DECLTYPE_TYPE: case TYPE_DECL: return tsubst (t, args, complain, in_decl); case USING_DECL: t = DECL_NAME (t); /* Fall through. */ case IDENTIFIER_NODE: if (IDENTIFIER_CONV_OP_P (t)) { tree new_type = tsubst (TREE_TYPE (t), args, complain, in_decl); return make_conv_op_name (new_type); } else return t; case CONSTRUCTOR: /* This is handled by tsubst_copy_and_build. */ gcc_unreachable (); case VA_ARG_EXPR: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); return build_x_va_arg (EXPR_LOCATION (t), op0, type); } case CLEANUP_POINT_EXPR: /* We shouldn't have built any of these during initial template generation. Instead, they should be built during instantiation in response to the saved STMT_IS_FULL_EXPR_P setting. */ gcc_unreachable (); case OFFSET_REF: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); r = build2 (code, type, op0, op1); PTRMEM_OK_P (r) = PTRMEM_OK_P (t); if (!mark_used (TREE_OPERAND (r, 1), complain) && !(complain & tf_error)) return error_mark_node; return r; } case EXPR_PACK_EXPANSION: error ("invalid use of pack expansion expression"); return error_mark_node; case NONTYPE_ARGUMENT_PACK: error ("use %<...%> to expand argument pack"); return error_mark_node; case VOID_CST: gcc_checking_assert (t == void_node && VOID_TYPE_P (TREE_TYPE (t))); return t; case INTEGER_CST: case REAL_CST: case STRING_CST: case COMPLEX_CST: { /* Instantiate any typedefs in the type. */ tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); r = fold_convert (type, t); gcc_assert (TREE_CODE (r) == code); return r; } case PTRMEM_CST: /* These can sometimes show up in a partial instantiation, but never involve template parms. */ gcc_assert (!uses_template_parms (t)); return t; case UNARY_LEFT_FOLD_EXPR: return tsubst_unary_left_fold (t, args, complain, in_decl); case UNARY_RIGHT_FOLD_EXPR: return tsubst_unary_right_fold (t, args, complain, in_decl); case BINARY_LEFT_FOLD_EXPR: return tsubst_binary_left_fold (t, args, complain, in_decl); case BINARY_RIGHT_FOLD_EXPR: return tsubst_binary_right_fold (t, args, complain, in_decl); case PREDICT_EXPR: return t; case DEBUG_BEGIN_STMT: /* ??? There's no point in copying it for now, but maybe some day it will contain more information, such as a pointer back to the containing function, inlined copy or so. */ return t; default: /* We shouldn't get here, but keep going if !flag_checking. */ if (flag_checking) gcc_unreachable (); return t; } } /* Helper function for tsubst_omp_clauses, used for instantiation of OMP_CLAUSE_DECL of clauses. */ static tree tsubst_omp_clause_decl (tree decl, tree args, tsubst_flags_t complain, tree in_decl) { if (decl == NULL_TREE) return NULL_TREE; /* Handle an OpenMP array section represented as a TREE_LIST (or OMP_CLAUSE_DEPEND_KIND). An OMP_CLAUSE_DEPEND (with a depend kind of OMP_CLAUSE_DEPEND_SINK) can also be represented as a TREE_LIST. We can handle it exactly the same as an array section (purpose, value, and a chain), even though the nomenclature (low_bound, length, etc) is different. */ if (TREE_CODE (decl) == TREE_LIST) { tree low_bound = tsubst_expr (TREE_PURPOSE (decl), args, complain, in_decl, /*integral_constant_expression_p=*/false); tree length = tsubst_expr (TREE_VALUE (decl), args, complain, in_decl, /*integral_constant_expression_p=*/false); tree chain = tsubst_omp_clause_decl (TREE_CHAIN (decl), args, complain, in_decl); if (TREE_PURPOSE (decl) == low_bound && TREE_VALUE (decl) == length && TREE_CHAIN (decl) == chain) return decl; tree ret = tree_cons (low_bound, length, chain); OMP_CLAUSE_DEPEND_SINK_NEGATIVE (ret) = OMP_CLAUSE_DEPEND_SINK_NEGATIVE (decl); return ret; } tree ret = tsubst_expr (decl, args, complain, in_decl, /*integral_constant_expression_p=*/false); /* Undo convert_from_reference tsubst_expr could have called. */ if (decl && REFERENCE_REF_P (ret) && !REFERENCE_REF_P (decl)) ret = TREE_OPERAND (ret, 0); return ret; } /* Like tsubst_copy, but specifically for OpenMP clauses. */ static tree tsubst_omp_clauses (tree clauses, enum c_omp_region_type ort, tree args, tsubst_flags_t complain, tree in_decl) { tree new_clauses = NULL_TREE, nc, oc; tree linear_no_step = NULL_TREE; for (oc = clauses; oc ; oc = OMP_CLAUSE_CHAIN (oc)) { nc = copy_node (oc); OMP_CLAUSE_CHAIN (nc) = new_clauses; new_clauses = nc; switch (OMP_CLAUSE_CODE (nc)) { case OMP_CLAUSE_LASTPRIVATE: if (OMP_CLAUSE_LASTPRIVATE_STMT (oc)) { OMP_CLAUSE_LASTPRIVATE_STMT (nc) = push_stmt_list (); tsubst_expr (OMP_CLAUSE_LASTPRIVATE_STMT (oc), args, complain, in_decl, /*integral_constant_expression_p=*/false); OMP_CLAUSE_LASTPRIVATE_STMT (nc) = pop_stmt_list (OMP_CLAUSE_LASTPRIVATE_STMT (nc)); } /* FALLTHRU */ case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_SHARED: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_UNIFORM: case OMP_CLAUSE_DEPEND: case OMP_CLAUSE_FROM: case OMP_CLAUSE_TO: case OMP_CLAUSE_MAP: case OMP_CLAUSE_USE_DEVICE_PTR: case OMP_CLAUSE_IS_DEVICE_PTR: OMP_CLAUSE_DECL (nc) = tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain, in_decl); break; case OMP_CLAUSE_TILE: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_FINAL: case OMP_CLAUSE_DEVICE: case OMP_CLAUSE_DIST_SCHEDULE: case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_THREAD_LIMIT: case OMP_CLAUSE_SAFELEN: case OMP_CLAUSE_SIMDLEN: case OMP_CLAUSE_NUM_TASKS: case OMP_CLAUSE_GRAINSIZE: case OMP_CLAUSE_PRIORITY: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_HINT: case OMP_CLAUSE_NUM_GANGS: case OMP_CLAUSE_NUM_WORKERS: case OMP_CLAUSE_VECTOR_LENGTH: case OMP_CLAUSE_WORKER: case OMP_CLAUSE_VECTOR: case OMP_CLAUSE_ASYNC: case OMP_CLAUSE_WAIT: OMP_CLAUSE_OPERAND (nc, 0) = tsubst_expr (OMP_CLAUSE_OPERAND (oc, 0), args, complain, in_decl, /*integral_constant_expression_p=*/false); break; case OMP_CLAUSE_REDUCTION: if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (oc)) { tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (oc); if (TREE_CODE (placeholder) == SCOPE_REF) { tree scope = tsubst (TREE_OPERAND (placeholder, 0), args, complain, in_decl); OMP_CLAUSE_REDUCTION_PLACEHOLDER (nc) = build_qualified_name (NULL_TREE, scope, TREE_OPERAND (placeholder, 1), false); } else gcc_assert (identifier_p (placeholder)); } OMP_CLAUSE_DECL (nc) = tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain, in_decl); break; case OMP_CLAUSE_GANG: case OMP_CLAUSE_ALIGNED: OMP_CLAUSE_DECL (nc) = tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain, in_decl); OMP_CLAUSE_OPERAND (nc, 1) = tsubst_expr (OMP_CLAUSE_OPERAND (oc, 1), args, complain, in_decl, /*integral_constant_expression_p=*/false); break; case OMP_CLAUSE_LINEAR: OMP_CLAUSE_DECL (nc) = tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain, in_decl); if (OMP_CLAUSE_LINEAR_STEP (oc) == NULL_TREE) { gcc_assert (!linear_no_step); linear_no_step = nc; } else if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (oc)) OMP_CLAUSE_LINEAR_STEP (nc) = tsubst_omp_clause_decl (OMP_CLAUSE_LINEAR_STEP (oc), args, complain, in_decl); else OMP_CLAUSE_LINEAR_STEP (nc) = tsubst_expr (OMP_CLAUSE_LINEAR_STEP (oc), args, complain, in_decl, /*integral_constant_expression_p=*/false); break; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_INBRANCH: case OMP_CLAUSE_NOTINBRANCH: case OMP_CLAUSE_PROC_BIND: case OMP_CLAUSE_FOR: case OMP_CLAUSE_PARALLEL: case OMP_CLAUSE_SECTIONS: case OMP_CLAUSE_TASKGROUP: case OMP_CLAUSE_NOGROUP: case OMP_CLAUSE_THREADS: case OMP_CLAUSE_SIMD: case OMP_CLAUSE_DEFAULTMAP: case OMP_CLAUSE_INDEPENDENT: case OMP_CLAUSE_AUTO: case OMP_CLAUSE_SEQ: break; default: gcc_unreachable (); } if ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP) switch (OMP_CLAUSE_CODE (nc)) { case OMP_CLAUSE_SHARED: case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_LASTPRIVATE: case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_LINEAR: case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_USE_DEVICE_PTR: case OMP_CLAUSE_IS_DEVICE_PTR: /* tsubst_expr on SCOPE_REF results in returning finish_non_static_data_member result. Undo that here. */ if (TREE_CODE (OMP_CLAUSE_DECL (oc)) == SCOPE_REF && (TREE_CODE (TREE_OPERAND (OMP_CLAUSE_DECL (oc), 1)) == IDENTIFIER_NODE)) { tree t = OMP_CLAUSE_DECL (nc); tree v = t; while (v) switch (TREE_CODE (v)) { case COMPONENT_REF: case MEM_REF: case INDIRECT_REF: CASE_CONVERT: case POINTER_PLUS_EXPR: v = TREE_OPERAND (v, 0); continue; case PARM_DECL: if (DECL_CONTEXT (v) == current_function_decl && DECL_ARTIFICIAL (v) && DECL_NAME (v) == this_identifier) OMP_CLAUSE_DECL (nc) = TREE_OPERAND (t, 1); /* FALLTHRU */ default: v = NULL_TREE; break; } } else if (VAR_P (OMP_CLAUSE_DECL (oc)) && DECL_HAS_VALUE_EXPR_P (OMP_CLAUSE_DECL (oc)) && DECL_ARTIFICIAL (OMP_CLAUSE_DECL (oc)) && DECL_LANG_SPECIFIC (OMP_CLAUSE_DECL (oc)) && DECL_OMP_PRIVATIZED_MEMBER (OMP_CLAUSE_DECL (oc))) { tree decl = OMP_CLAUSE_DECL (nc); if (VAR_P (decl)) { retrofit_lang_decl (decl); DECL_OMP_PRIVATIZED_MEMBER (decl) = 1; } } break; default: break; } } new_clauses = nreverse (new_clauses); if (ort != C_ORT_OMP_DECLARE_SIMD) { new_clauses = finish_omp_clauses (new_clauses, ort); if (linear_no_step) for (nc = new_clauses; nc; nc = OMP_CLAUSE_CHAIN (nc)) if (nc == linear_no_step) { OMP_CLAUSE_LINEAR_STEP (nc) = NULL_TREE; break; } } return new_clauses; } /* Like tsubst_copy_and_build, but unshare TREE_LIST nodes. */ static tree tsubst_copy_asm_operands (tree t, tree args, tsubst_flags_t complain, tree in_decl) { #define RECUR(t) tsubst_copy_asm_operands (t, args, complain, in_decl) tree purpose, value, chain; if (t == NULL) return t; if (TREE_CODE (t) != TREE_LIST) return tsubst_copy_and_build (t, args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/false); if (t == void_list_node) return t; purpose = TREE_PURPOSE (t); if (purpose) purpose = RECUR (purpose); value = TREE_VALUE (t); if (value) { if (TREE_CODE (value) != LABEL_DECL) value = RECUR (value); else { value = lookup_label (DECL_NAME (value)); gcc_assert (TREE_CODE (value) == LABEL_DECL); TREE_USED (value) = 1; } } chain = TREE_CHAIN (t); if (chain && chain != void_type_node) chain = RECUR (chain); return tree_cons (purpose, value, chain); #undef RECUR } /* Used to temporarily communicate the list of #pragma omp parallel clauses to #pragma omp for instantiation if they are combined together. */ static tree *omp_parallel_combined_clauses; /* Substitute one OMP_FOR iterator. */ static void tsubst_omp_for_iterator (tree t, int i, tree declv, tree orig_declv, tree initv, tree condv, tree incrv, tree *clauses, tree args, tsubst_flags_t complain, tree in_decl, bool integral_constant_expression_p) { #define RECUR(NODE) \ tsubst_expr ((NODE), args, complain, in_decl, \ integral_constant_expression_p) tree decl, init, cond, incr; init = TREE_VEC_ELT (OMP_FOR_INIT (t), i); gcc_assert (TREE_CODE (init) == MODIFY_EXPR); if (orig_declv && OMP_FOR_ORIG_DECLS (t)) { tree o = TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (t), i); TREE_VEC_ELT (orig_declv, i) = RECUR (o); } decl = TREE_OPERAND (init, 0); init = TREE_OPERAND (init, 1); tree decl_expr = NULL_TREE; if (init && TREE_CODE (init) == DECL_EXPR) { /* We need to jump through some hoops to handle declarations in the init-statement, since we might need to handle auto deduction, but we need to keep control of initialization. */ decl_expr = init; init = DECL_INITIAL (DECL_EXPR_DECL (init)); decl = tsubst_decl (decl, args, complain); } else { if (TREE_CODE (decl) == SCOPE_REF) { decl = RECUR (decl); if (TREE_CODE (decl) == COMPONENT_REF) { tree v = decl; while (v) switch (TREE_CODE (v)) { case COMPONENT_REF: case MEM_REF: case INDIRECT_REF: CASE_CONVERT: case POINTER_PLUS_EXPR: v = TREE_OPERAND (v, 0); continue; case PARM_DECL: if (DECL_CONTEXT (v) == current_function_decl && DECL_ARTIFICIAL (v) && DECL_NAME (v) == this_identifier) { decl = TREE_OPERAND (decl, 1); decl = omp_privatize_field (decl, false); } /* FALLTHRU */ default: v = NULL_TREE; break; } } } else decl = RECUR (decl); } init = RECUR (init); tree auto_node = type_uses_auto (TREE_TYPE (decl)); if (auto_node && init) TREE_TYPE (decl) = do_auto_deduction (TREE_TYPE (decl), init, auto_node, complain); gcc_assert (!type_dependent_expression_p (decl)); if (!CLASS_TYPE_P (TREE_TYPE (decl))) { if (decl_expr) { /* Declare the variable, but don't let that initialize it. */ tree init_sav = DECL_INITIAL (DECL_EXPR_DECL (decl_expr)); DECL_INITIAL (DECL_EXPR_DECL (decl_expr)) = NULL_TREE; RECUR (decl_expr); DECL_INITIAL (DECL_EXPR_DECL (decl_expr)) = init_sav; } cond = RECUR (TREE_VEC_ELT (OMP_FOR_COND (t), i)); incr = TREE_VEC_ELT (OMP_FOR_INCR (t), i); if (TREE_CODE (incr) == MODIFY_EXPR) { tree lhs = RECUR (TREE_OPERAND (incr, 0)); tree rhs = RECUR (TREE_OPERAND (incr, 1)); incr = build_x_modify_expr (EXPR_LOCATION (incr), lhs, NOP_EXPR, rhs, complain); } else incr = RECUR (incr); TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; return; } if (decl_expr) { /* Declare and initialize the variable. */ RECUR (decl_expr); init = NULL_TREE; } else if (init) { tree *pc; int j; for (j = (omp_parallel_combined_clauses == NULL ? 1 : 0); j < 2; j++) { for (pc = j ? clauses : omp_parallel_combined_clauses; *pc; ) { if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_PRIVATE && OMP_CLAUSE_DECL (*pc) == decl) break; else if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_DECL (*pc) == decl) { if (j) break; /* Move lastprivate (decl) clause to OMP_FOR_CLAUSES. */ tree c = *pc; *pc = OMP_CLAUSE_CHAIN (c); OMP_CLAUSE_CHAIN (c) = *clauses; *clauses = c; } else if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_FIRSTPRIVATE && OMP_CLAUSE_DECL (*pc) == decl) { error ("iteration variable %qD should not be firstprivate", decl); *pc = OMP_CLAUSE_CHAIN (*pc); } else if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_DECL (*pc) == decl) { error ("iteration variable %qD should not be reduction", decl); *pc = OMP_CLAUSE_CHAIN (*pc); } else pc = &OMP_CLAUSE_CHAIN (*pc); } if (*pc) break; } if (*pc == NULL_TREE) { tree c = build_omp_clause (input_location, OMP_CLAUSE_PRIVATE); OMP_CLAUSE_DECL (c) = decl; c = finish_omp_clauses (c, C_ORT_OMP); if (c) { OMP_CLAUSE_CHAIN (c) = *clauses; *clauses = c; } } } cond = TREE_VEC_ELT (OMP_FOR_COND (t), i); if (COMPARISON_CLASS_P (cond)) { tree op0 = RECUR (TREE_OPERAND (cond, 0)); tree op1 = RECUR (TREE_OPERAND (cond, 1)); cond = build2 (TREE_CODE (cond), boolean_type_node, op0, op1); } else cond = RECUR (cond); incr = TREE_VEC_ELT (OMP_FOR_INCR (t), i); switch (TREE_CODE (incr)) { case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: incr = build2 (TREE_CODE (incr), TREE_TYPE (decl), RECUR (TREE_OPERAND (incr, 0)), NULL_TREE); break; case MODIFY_EXPR: if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR || TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR) { tree rhs = TREE_OPERAND (incr, 1); tree lhs = RECUR (TREE_OPERAND (incr, 0)); tree rhs0 = RECUR (TREE_OPERAND (rhs, 0)); tree rhs1 = RECUR (TREE_OPERAND (rhs, 1)); incr = build2 (MODIFY_EXPR, TREE_TYPE (decl), lhs, build2 (TREE_CODE (rhs), TREE_TYPE (decl), rhs0, rhs1)); } else incr = RECUR (incr); break; case MODOP_EXPR: if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR || TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR) { tree lhs = RECUR (TREE_OPERAND (incr, 0)); incr = build2 (MODIFY_EXPR, TREE_TYPE (decl), lhs, build2 (TREE_CODE (TREE_OPERAND (incr, 1)), TREE_TYPE (decl), lhs, RECUR (TREE_OPERAND (incr, 2)))); } else if (TREE_CODE (TREE_OPERAND (incr, 1)) == NOP_EXPR && (TREE_CODE (TREE_OPERAND (incr, 2)) == PLUS_EXPR || (TREE_CODE (TREE_OPERAND (incr, 2)) == MINUS_EXPR))) { tree rhs = TREE_OPERAND (incr, 2); tree lhs = RECUR (TREE_OPERAND (incr, 0)); tree rhs0 = RECUR (TREE_OPERAND (rhs, 0)); tree rhs1 = RECUR (TREE_OPERAND (rhs, 1)); incr = build2 (MODIFY_EXPR, TREE_TYPE (decl), lhs, build2 (TREE_CODE (rhs), TREE_TYPE (decl), rhs0, rhs1)); } else incr = RECUR (incr); break; default: incr = RECUR (incr); break; } TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; #undef RECUR } /* Helper function of tsubst_expr, find OMP_TEAMS inside of OMP_TARGET's body. */ static tree tsubst_find_omp_teams (tree *tp, int *walk_subtrees, void *) { *walk_subtrees = 0; switch (TREE_CODE (*tp)) { case OMP_TEAMS: return *tp; case BIND_EXPR: case STATEMENT_LIST: *walk_subtrees = 1; break; default: break; } return NULL_TREE; } /* Helper function for tsubst_expr. For decomposition declaration artificial base DECL, which is tsubsted PATTERN_DECL, tsubst also the corresponding decls representing the identifiers of the decomposition declaration. Return DECL if successful or error_mark_node otherwise, set *FIRST to the first decl in the list chained through DECL_CHAIN and *CNT to the number of such decls. */ static tree tsubst_decomp_names (tree decl, tree pattern_decl, tree args, tsubst_flags_t complain, tree in_decl, tree *first, unsigned int *cnt) { tree decl2, decl3, prev = decl; *cnt = 0; gcc_assert (DECL_NAME (decl) == NULL_TREE); for (decl2 = DECL_CHAIN (pattern_decl); decl2 && VAR_P (decl2) && DECL_DECOMPOSITION_P (decl2) && DECL_NAME (decl2); decl2 = DECL_CHAIN (decl2)) { if (TREE_TYPE (decl2) == error_mark_node && *cnt == 0) { gcc_assert (errorcount); return error_mark_node; } (*cnt)++; gcc_assert (DECL_DECOMP_BASE (decl2) == pattern_decl); gcc_assert (DECL_HAS_VALUE_EXPR_P (decl2)); tree v = DECL_VALUE_EXPR (decl2); DECL_HAS_VALUE_EXPR_P (decl2) = 0; SET_DECL_VALUE_EXPR (decl2, NULL_TREE); decl3 = tsubst (decl2, args, complain, in_decl); SET_DECL_VALUE_EXPR (decl2, v); DECL_HAS_VALUE_EXPR_P (decl2) = 1; if (VAR_P (decl3)) DECL_TEMPLATE_INSTANTIATED (decl3) = 1; else { gcc_assert (errorcount); decl = error_mark_node; continue; } maybe_push_decl (decl3); if (error_operand_p (decl3)) decl = error_mark_node; else if (decl != error_mark_node && DECL_CHAIN (decl3) != prev && decl != prev) { gcc_assert (errorcount); decl = error_mark_node; } else prev = decl3; } *first = prev; return decl; } /* Like tsubst_copy for expressions, etc. but also does semantic processing. */ tree tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl, bool integral_constant_expression_p) { #define RETURN(EXP) do { r = (EXP); goto out; } while(0) #define RECUR(NODE) \ tsubst_expr ((NODE), args, complain, in_decl, \ integral_constant_expression_p) tree stmt, tmp; tree r; location_t loc; if (t == NULL_TREE || t == error_mark_node) return t; loc = input_location; if (EXPR_HAS_LOCATION (t)) input_location = EXPR_LOCATION (t); if (STATEMENT_CODE_P (TREE_CODE (t))) current_stmt_tree ()->stmts_are_full_exprs_p = STMT_IS_FULL_EXPR_P (t); switch (TREE_CODE (t)) { case STATEMENT_LIST: { tree_stmt_iterator i; for (i = tsi_start (t); !tsi_end_p (i); tsi_next (&i)) RECUR (tsi_stmt (i)); break; } case CTOR_INITIALIZER: finish_mem_initializers (tsubst_initializer_list (TREE_OPERAND (t, 0), args)); break; case RETURN_EXPR: finish_return_stmt (RECUR (TREE_OPERAND (t, 0))); break; case EXPR_STMT: tmp = RECUR (EXPR_STMT_EXPR (t)); if (EXPR_STMT_STMT_EXPR_RESULT (t)) finish_stmt_expr_expr (tmp, cur_stmt_expr); else finish_expr_stmt (tmp); break; case USING_STMT: finish_local_using_directive (USING_STMT_NAMESPACE (t), /*attribs=*/NULL_TREE); break; case DECL_EXPR: { tree decl, pattern_decl; tree init; pattern_decl = decl = DECL_EXPR_DECL (t); if (TREE_CODE (decl) == LABEL_DECL) finish_label_decl (DECL_NAME (decl)); else if (TREE_CODE (decl) == USING_DECL) { tree scope = USING_DECL_SCOPE (decl); tree name = DECL_NAME (decl); scope = tsubst (scope, args, complain, in_decl); decl = lookup_qualified_name (scope, name, /*is_type_p=*/false, /*complain=*/false); if (decl == error_mark_node || TREE_CODE (decl) == TREE_LIST) qualified_name_lookup_error (scope, name, decl, input_location); else finish_local_using_decl (decl, scope, name); } else if (is_capture_proxy (decl) && !DECL_TEMPLATE_INSTANTIATION (current_function_decl)) { /* We're in tsubst_lambda_expr, we've already inserted a new capture proxy, so look it up and register it. */ tree inst; if (DECL_PACK_P (decl)) { inst = (retrieve_local_specialization (DECL_CAPTURED_VARIABLE (decl))); gcc_assert (TREE_CODE (inst) == NONTYPE_ARGUMENT_PACK); } else { inst = lookup_name_real (DECL_NAME (decl), 0, 0, /*block_p=*/true, 0, LOOKUP_HIDDEN); gcc_assert (inst != decl && is_capture_proxy (inst)); } register_local_specialization (inst, decl); break; } else if (DECL_IMPLICIT_TYPEDEF_P (decl) && LAMBDA_TYPE_P (TREE_TYPE (decl))) /* Don't copy the old closure; we'll create a new one in tsubst_lambda_expr. */ break; else { init = DECL_INITIAL (decl); decl = tsubst (decl, args, complain, in_decl); if (decl != error_mark_node) { /* By marking the declaration as instantiated, we avoid trying to instantiate it. Since instantiate_decl can't handle local variables, and since we've already done all that needs to be done, that's the right thing to do. */ if (VAR_P (decl)) DECL_TEMPLATE_INSTANTIATED (decl) = 1; if (VAR_P (decl) && ANON_AGGR_TYPE_P (TREE_TYPE (decl))) /* Anonymous aggregates are a special case. */ finish_anon_union (decl); else if (is_capture_proxy (DECL_EXPR_DECL (t))) { DECL_CONTEXT (decl) = current_function_decl; if (DECL_NAME (decl) == this_identifier) { tree lam = DECL_CONTEXT (current_function_decl); lam = CLASSTYPE_LAMBDA_EXPR (lam); LAMBDA_EXPR_THIS_CAPTURE (lam) = decl; } insert_capture_proxy (decl); } else if (DECL_IMPLICIT_TYPEDEF_P (t)) /* We already did a pushtag. */; else if (TREE_CODE (decl) == FUNCTION_DECL && DECL_OMP_DECLARE_REDUCTION_P (decl) && DECL_FUNCTION_SCOPE_P (pattern_decl)) { DECL_CONTEXT (decl) = NULL_TREE; pushdecl (decl); DECL_CONTEXT (decl) = current_function_decl; cp_check_omp_declare_reduction (decl); } else { int const_init = false; maybe_push_decl (decl); if (VAR_P (decl) && DECL_PRETTY_FUNCTION_P (decl)) { /* For __PRETTY_FUNCTION__ we have to adjust the initializer. */ const char *const name = cxx_printable_name (current_function_decl, 2); init = cp_fname_init (name, &TREE_TYPE (decl)); } else init = tsubst_init (init, decl, args, complain, in_decl); if (VAR_P (decl)) const_init = (DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (pattern_decl)); if (VAR_P (decl) && DECL_DECOMPOSITION_P (decl) && TREE_TYPE (pattern_decl) != error_mark_node) { unsigned int cnt; tree first; tree ndecl = tsubst_decomp_names (decl, pattern_decl, args, complain, in_decl, &first, &cnt); if (ndecl != error_mark_node) cp_maybe_mangle_decomp (ndecl, first, cnt); cp_finish_decl (decl, init, const_init, NULL_TREE, 0); if (ndecl != error_mark_node) cp_finish_decomp (ndecl, first, cnt); } else cp_finish_decl (decl, init, const_init, NULL_TREE, 0); } } } break; } case FOR_STMT: stmt = begin_for_stmt (NULL_TREE, NULL_TREE); RECUR (FOR_INIT_STMT (t)); finish_init_stmt (stmt); tmp = RECUR (FOR_COND (t)); finish_for_cond (tmp, stmt, false, 0); tmp = RECUR (FOR_EXPR (t)); finish_for_expr (tmp, stmt); { bool prev = note_iteration_stmt_body_start (); RECUR (FOR_BODY (t)); note_iteration_stmt_body_end (prev); } finish_for_stmt (stmt); break; case RANGE_FOR_STMT: { /* Construct another range_for, if this is not a final substitution (for inside inside a generic lambda of a template). Otherwise convert to a regular for. */ tree decl, expr; stmt = (processing_template_decl ? begin_range_for_stmt (NULL_TREE, NULL_TREE) : begin_for_stmt (NULL_TREE, NULL_TREE)); decl = RANGE_FOR_DECL (t); decl = tsubst (decl, args, complain, in_decl); maybe_push_decl (decl); expr = RECUR (RANGE_FOR_EXPR (t)); tree decomp_first = NULL_TREE; unsigned decomp_cnt = 0; if (VAR_P (decl) && DECL_DECOMPOSITION_P (decl)) decl = tsubst_decomp_names (decl, RANGE_FOR_DECL (t), args, complain, in_decl, &decomp_first, &decomp_cnt); if (processing_template_decl) { RANGE_FOR_IVDEP (stmt) = RANGE_FOR_IVDEP (t); RANGE_FOR_UNROLL (stmt) = RANGE_FOR_UNROLL (t); finish_range_for_decl (stmt, decl, expr); } else { unsigned short unroll = (RANGE_FOR_UNROLL (t) ? tree_to_uhwi (RANGE_FOR_UNROLL (t)) : 0); stmt = cp_convert_range_for (stmt, decl, expr, decomp_first, decomp_cnt, RANGE_FOR_IVDEP (t), unroll); } bool prev = note_iteration_stmt_body_start (); RECUR (RANGE_FOR_BODY (t)); note_iteration_stmt_body_end (prev); finish_for_stmt (stmt); } break; case WHILE_STMT: stmt = begin_while_stmt (); tmp = RECUR (WHILE_COND (t)); finish_while_stmt_cond (tmp, stmt, false, 0); { bool prev = note_iteration_stmt_body_start (); RECUR (WHILE_BODY (t)); note_iteration_stmt_body_end (prev); } finish_while_stmt (stmt); break; case DO_STMT: stmt = begin_do_stmt (); { bool prev = note_iteration_stmt_body_start (); RECUR (DO_BODY (t)); note_iteration_stmt_body_end (prev); } finish_do_body (stmt); tmp = RECUR (DO_COND (t)); finish_do_stmt (tmp, stmt, false, 0); break; case IF_STMT: stmt = begin_if_stmt (); IF_STMT_CONSTEXPR_P (stmt) = IF_STMT_CONSTEXPR_P (t); if (IF_STMT_CONSTEXPR_P (t)) args = add_extra_args (IF_STMT_EXTRA_ARGS (t), args); tmp = RECUR (IF_COND (t)); tmp = finish_if_stmt_cond (tmp, stmt); if (IF_STMT_CONSTEXPR_P (t) && instantiation_dependent_expression_p (tmp)) { /* We're partially instantiating a generic lambda, but the condition of the constexpr if is still dependent. Don't substitute into the branches now, just remember the template arguments. */ do_poplevel (IF_SCOPE (stmt)); IF_COND (stmt) = IF_COND (t); THEN_CLAUSE (stmt) = THEN_CLAUSE (t); ELSE_CLAUSE (stmt) = ELSE_CLAUSE (t); IF_STMT_EXTRA_ARGS (stmt) = build_extra_args (t, args, complain); add_stmt (stmt); break; } if (IF_STMT_CONSTEXPR_P (t) && integer_zerop (tmp)) /* Don't instantiate the THEN_CLAUSE. */; else { bool inhibit = integer_zerop (fold_non_dependent_expr (tmp)); if (inhibit) ++c_inhibit_evaluation_warnings; RECUR (THEN_CLAUSE (t)); if (inhibit) --c_inhibit_evaluation_warnings; } finish_then_clause (stmt); if (IF_STMT_CONSTEXPR_P (t) && integer_nonzerop (tmp)) /* Don't instantiate the ELSE_CLAUSE. */; else if (ELSE_CLAUSE (t)) { bool inhibit = integer_nonzerop (fold_non_dependent_expr (tmp)); begin_else_clause (stmt); if (inhibit) ++c_inhibit_evaluation_warnings; RECUR (ELSE_CLAUSE (t)); if (inhibit) --c_inhibit_evaluation_warnings; finish_else_clause (stmt); } finish_if_stmt (stmt); break; case BIND_EXPR: if (BIND_EXPR_BODY_BLOCK (t)) stmt = begin_function_body (); else stmt = begin_compound_stmt (BIND_EXPR_TRY_BLOCK (t) ? BCS_TRY_BLOCK : 0); RECUR (BIND_EXPR_BODY (t)); if (BIND_EXPR_BODY_BLOCK (t)) finish_function_body (stmt); else finish_compound_stmt (stmt); break; case BREAK_STMT: finish_break_stmt (); break; case CONTINUE_STMT: finish_continue_stmt (); break; case SWITCH_STMT: stmt = begin_switch_stmt (); tmp = RECUR (SWITCH_STMT_COND (t)); finish_switch_cond (tmp, stmt); RECUR (SWITCH_STMT_BODY (t)); finish_switch_stmt (stmt); break; case CASE_LABEL_EXPR: { tree low = RECUR (CASE_LOW (t)); tree high = RECUR (CASE_HIGH (t)); tree l = finish_case_label (EXPR_LOCATION (t), low, high); if (l && TREE_CODE (l) == CASE_LABEL_EXPR) FALLTHROUGH_LABEL_P (CASE_LABEL (l)) = FALLTHROUGH_LABEL_P (CASE_LABEL (t)); } break; case LABEL_EXPR: { tree decl = LABEL_EXPR_LABEL (t); tree label; label = finish_label_stmt (DECL_NAME (decl)); if (TREE_CODE (label) == LABEL_DECL) FALLTHROUGH_LABEL_P (label) = FALLTHROUGH_LABEL_P (decl); if (DECL_ATTRIBUTES (decl) != NULL_TREE) cplus_decl_attributes (&label, DECL_ATTRIBUTES (decl), 0); } break; case GOTO_EXPR: tmp = GOTO_DESTINATION (t); if (TREE_CODE (tmp) != LABEL_DECL) /* Computed goto's must be tsubst'd into. On the other hand, non-computed gotos must not be; the identifier in question will have no binding. */ tmp = RECUR (tmp); else tmp = DECL_NAME (tmp); finish_goto_stmt (tmp); break; case ASM_EXPR: { tree string = RECUR (ASM_STRING (t)); tree outputs = tsubst_copy_asm_operands (ASM_OUTPUTS (t), args, complain, in_decl); tree inputs = tsubst_copy_asm_operands (ASM_INPUTS (t), args, complain, in_decl); tree clobbers = tsubst_copy_asm_operands (ASM_CLOBBERS (t), args, complain, in_decl); tree labels = tsubst_copy_asm_operands (ASM_LABELS (t), args, complain, in_decl); tmp = finish_asm_stmt (ASM_VOLATILE_P (t), string, outputs, inputs, clobbers, labels); tree asm_expr = tmp; if (TREE_CODE (asm_expr) == CLEANUP_POINT_EXPR) asm_expr = TREE_OPERAND (asm_expr, 0); ASM_INPUT_P (asm_expr) = ASM_INPUT_P (t); } break; case TRY_BLOCK: if (CLEANUP_P (t)) { stmt = begin_try_block (); RECUR (TRY_STMTS (t)); finish_cleanup_try_block (stmt); finish_cleanup (RECUR (TRY_HANDLERS (t)), stmt); } else { tree compound_stmt = NULL_TREE; if (FN_TRY_BLOCK_P (t)) stmt = begin_function_try_block (&compound_stmt); else stmt = begin_try_block (); RECUR (TRY_STMTS (t)); if (FN_TRY_BLOCK_P (t)) finish_function_try_block (stmt); else finish_try_block (stmt); RECUR (TRY_HANDLERS (t)); if (FN_TRY_BLOCK_P (t)) finish_function_handler_sequence (stmt, compound_stmt); else finish_handler_sequence (stmt); } break; case HANDLER: { tree decl = HANDLER_PARMS (t); if (decl) { decl = tsubst (decl, args, complain, in_decl); /* Prevent instantiate_decl from trying to instantiate this variable. We've already done all that needs to be done. */ if (decl != error_mark_node) DECL_TEMPLATE_INSTANTIATED (decl) = 1; } stmt = begin_handler (); finish_handler_parms (decl, stmt); RECUR (HANDLER_BODY (t)); finish_handler (stmt); } break; case TAG_DEFN: tmp = tsubst (TREE_TYPE (t), args, complain, NULL_TREE); if (CLASS_TYPE_P (tmp)) { /* Local classes are not independent templates; they are instantiated along with their containing function. And this way we don't have to deal with pushing out of one local class to instantiate a member of another local class. */ /* Closures are handled by the LAMBDA_EXPR. */ gcc_assert (!LAMBDA_TYPE_P (TREE_TYPE (t))); complete_type (tmp); for (tree fld = TYPE_FIELDS (tmp); fld; fld = DECL_CHAIN (fld)) if ((VAR_P (fld) || (TREE_CODE (fld) == FUNCTION_DECL && !DECL_ARTIFICIAL (fld))) && DECL_TEMPLATE_INSTANTIATION (fld)) instantiate_decl (fld, /*defer_ok=*/false, /*expl_inst_class=*/false); } break; case STATIC_ASSERT: { tree condition; ++c_inhibit_evaluation_warnings; condition = tsubst_expr (STATIC_ASSERT_CONDITION (t), args, complain, in_decl, /*integral_constant_expression_p=*/true); --c_inhibit_evaluation_warnings; finish_static_assert (condition, STATIC_ASSERT_MESSAGE (t), STATIC_ASSERT_SOURCE_LOCATION (t), /*member_p=*/false); } break; case OACC_KERNELS: case OACC_PARALLEL: tmp = tsubst_omp_clauses (OMP_CLAUSES (t), C_ORT_ACC, args, complain, in_decl); stmt = begin_omp_parallel (); RECUR (OMP_BODY (t)); finish_omp_construct (TREE_CODE (t), stmt, tmp); break; case OMP_PARALLEL: r = push_omp_privatization_clauses (OMP_PARALLEL_COMBINED (t)); tmp = tsubst_omp_clauses (OMP_PARALLEL_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); if (OMP_PARALLEL_COMBINED (t)) omp_parallel_combined_clauses = &tmp; stmt = begin_omp_parallel (); RECUR (OMP_PARALLEL_BODY (t)); gcc_assert (omp_parallel_combined_clauses == NULL); OMP_PARALLEL_COMBINED (finish_omp_parallel (tmp, stmt)) = OMP_PARALLEL_COMBINED (t); pop_omp_privatization_clauses (r); break; case OMP_TASK: r = push_omp_privatization_clauses (false); tmp = tsubst_omp_clauses (OMP_TASK_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); stmt = begin_omp_task (); RECUR (OMP_TASK_BODY (t)); finish_omp_task (tmp, stmt); pop_omp_privatization_clauses (r); break; case OMP_FOR: case OMP_SIMD: case OMP_DISTRIBUTE: case OMP_TASKLOOP: case OACC_LOOP: { tree clauses, body, pre_body; tree declv = NULL_TREE, initv = NULL_TREE, condv = NULL_TREE; tree orig_declv = NULL_TREE; tree incrv = NULL_TREE; enum c_omp_region_type ort = C_ORT_OMP; int i; if (TREE_CODE (t) == OACC_LOOP) ort = C_ORT_ACC; r = push_omp_privatization_clauses (OMP_FOR_INIT (t) == NULL_TREE); clauses = tsubst_omp_clauses (OMP_FOR_CLAUSES (t), ort, args, complain, in_decl); if (OMP_FOR_INIT (t) != NULL_TREE) { declv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t))); if (OMP_FOR_ORIG_DECLS (t)) orig_declv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t))); initv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t))); condv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t))); incrv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t))); } stmt = begin_omp_structured_block (); pre_body = push_stmt_list (); RECUR (OMP_FOR_PRE_BODY (t)); pre_body = pop_stmt_list (pre_body); if (OMP_FOR_INIT (t) != NULL_TREE) for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (t)); i++) tsubst_omp_for_iterator (t, i, declv, orig_declv, initv, condv, incrv, &clauses, args, complain, in_decl, integral_constant_expression_p); omp_parallel_combined_clauses = NULL; body = push_stmt_list (); RECUR (OMP_FOR_BODY (t)); body = pop_stmt_list (body); if (OMP_FOR_INIT (t) != NULL_TREE) t = finish_omp_for (EXPR_LOCATION (t), TREE_CODE (t), declv, orig_declv, initv, condv, incrv, body, pre_body, NULL, clauses); else { t = make_node (TREE_CODE (t)); TREE_TYPE (t) = void_type_node; OMP_FOR_BODY (t) = body; OMP_FOR_PRE_BODY (t) = pre_body; OMP_FOR_CLAUSES (t) = clauses; SET_EXPR_LOCATION (t, EXPR_LOCATION (t)); add_stmt (t); } add_stmt (finish_omp_structured_block (stmt)); pop_omp_privatization_clauses (r); } break; case OMP_SECTIONS: omp_parallel_combined_clauses = NULL; /* FALLTHRU */ case OMP_SINGLE: case OMP_TEAMS: case OMP_CRITICAL: r = push_omp_privatization_clauses (TREE_CODE (t) == OMP_TEAMS && OMP_TEAMS_COMBINED (t)); tmp = tsubst_omp_clauses (OMP_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); stmt = push_stmt_list (); RECUR (OMP_BODY (t)); stmt = pop_stmt_list (stmt); t = copy_node (t); OMP_BODY (t) = stmt; OMP_CLAUSES (t) = tmp; add_stmt (t); pop_omp_privatization_clauses (r); break; case OACC_DATA: case OMP_TARGET_DATA: case OMP_TARGET: tmp = tsubst_omp_clauses (OMP_CLAUSES (t), (TREE_CODE (t) == OACC_DATA) ? C_ORT_ACC : C_ORT_OMP, args, complain, in_decl); keep_next_level (true); stmt = begin_omp_structured_block (); RECUR (OMP_BODY (t)); stmt = finish_omp_structured_block (stmt); t = copy_node (t); OMP_BODY (t) = stmt; OMP_CLAUSES (t) = tmp; if (TREE_CODE (t) == OMP_TARGET && OMP_TARGET_COMBINED (t)) { tree teams = cp_walk_tree (&stmt, tsubst_find_omp_teams, NULL, NULL); if (teams) { /* For combined target teams, ensure the num_teams and thread_limit clause expressions are evaluated on the host, before entering the target construct. */ tree c; for (c = OMP_TEAMS_CLAUSES (teams); c; c = OMP_CLAUSE_CHAIN (c)) if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_TEAMS || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_THREAD_LIMIT) && TREE_CODE (OMP_CLAUSE_OPERAND (c, 0)) != INTEGER_CST) { tree expr = OMP_CLAUSE_OPERAND (c, 0); expr = force_target_expr (TREE_TYPE (expr), expr, tf_none); if (expr == error_mark_node) continue; tmp = TARGET_EXPR_SLOT (expr); add_stmt (expr); OMP_CLAUSE_OPERAND (c, 0) = expr; tree tc = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (tc) = tmp; OMP_CLAUSE_CHAIN (tc) = OMP_TARGET_CLAUSES (t); OMP_TARGET_CLAUSES (t) = tc; } } } add_stmt (t); break; case OACC_DECLARE: t = copy_node (t); tmp = tsubst_omp_clauses (OACC_DECLARE_CLAUSES (t), C_ORT_ACC, args, complain, in_decl); OACC_DECLARE_CLAUSES (t) = tmp; add_stmt (t); break; case OMP_TARGET_UPDATE: case OMP_TARGET_ENTER_DATA: case OMP_TARGET_EXIT_DATA: tmp = tsubst_omp_clauses (OMP_STANDALONE_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); t = copy_node (t); OMP_STANDALONE_CLAUSES (t) = tmp; add_stmt (t); break; case OACC_ENTER_DATA: case OACC_EXIT_DATA: case OACC_UPDATE: tmp = tsubst_omp_clauses (OMP_STANDALONE_CLAUSES (t), C_ORT_ACC, args, complain, in_decl); t = copy_node (t); OMP_STANDALONE_CLAUSES (t) = tmp; add_stmt (t); break; case OMP_ORDERED: tmp = tsubst_omp_clauses (OMP_ORDERED_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); stmt = push_stmt_list (); RECUR (OMP_BODY (t)); stmt = pop_stmt_list (stmt); t = copy_node (t); OMP_BODY (t) = stmt; OMP_ORDERED_CLAUSES (t) = tmp; add_stmt (t); break; case OMP_SECTION: case OMP_MASTER: case OMP_TASKGROUP: stmt = push_stmt_list (); RECUR (OMP_BODY (t)); stmt = pop_stmt_list (stmt); t = copy_node (t); OMP_BODY (t) = stmt; add_stmt (t); break; case OMP_ATOMIC: gcc_assert (OMP_ATOMIC_DEPENDENT_P (t)); if (TREE_CODE (TREE_OPERAND (t, 1)) != MODIFY_EXPR) { tree op1 = TREE_OPERAND (t, 1); tree rhs1 = NULL_TREE; tree lhs, rhs; if (TREE_CODE (op1) == COMPOUND_EXPR) { rhs1 = RECUR (TREE_OPERAND (op1, 0)); op1 = TREE_OPERAND (op1, 1); } lhs = RECUR (TREE_OPERAND (op1, 0)); rhs = RECUR (TREE_OPERAND (op1, 1)); finish_omp_atomic (OMP_ATOMIC, TREE_CODE (op1), lhs, rhs, NULL_TREE, NULL_TREE, rhs1, OMP_ATOMIC_SEQ_CST (t)); } else { tree op1 = TREE_OPERAND (t, 1); tree v = NULL_TREE, lhs, rhs = NULL_TREE, lhs1 = NULL_TREE; tree rhs1 = NULL_TREE; enum tree_code code = TREE_CODE (TREE_OPERAND (op1, 1)); enum tree_code opcode = NOP_EXPR; if (code == OMP_ATOMIC_READ) { v = RECUR (TREE_OPERAND (op1, 0)); lhs = RECUR (TREE_OPERAND (TREE_OPERAND (op1, 1), 0)); } else if (code == OMP_ATOMIC_CAPTURE_OLD || code == OMP_ATOMIC_CAPTURE_NEW) { tree op11 = TREE_OPERAND (TREE_OPERAND (op1, 1), 1); v = RECUR (TREE_OPERAND (op1, 0)); lhs1 = RECUR (TREE_OPERAND (TREE_OPERAND (op1, 1), 0)); if (TREE_CODE (op11) == COMPOUND_EXPR) { rhs1 = RECUR (TREE_OPERAND (op11, 0)); op11 = TREE_OPERAND (op11, 1); } lhs = RECUR (TREE_OPERAND (op11, 0)); rhs = RECUR (TREE_OPERAND (op11, 1)); opcode = TREE_CODE (op11); if (opcode == MODIFY_EXPR) opcode = NOP_EXPR; } else { code = OMP_ATOMIC; lhs = RECUR (TREE_OPERAND (op1, 0)); rhs = RECUR (TREE_OPERAND (op1, 1)); } finish_omp_atomic (code, opcode, lhs, rhs, v, lhs1, rhs1, OMP_ATOMIC_SEQ_CST (t)); } break; case TRANSACTION_EXPR: { int flags = 0; flags |= (TRANSACTION_EXPR_OUTER (t) ? TM_STMT_ATTR_OUTER : 0); flags |= (TRANSACTION_EXPR_RELAXED (t) ? TM_STMT_ATTR_RELAXED : 0); if (TRANSACTION_EXPR_IS_STMT (t)) { tree body = TRANSACTION_EXPR_BODY (t); tree noex = NULL_TREE; if (TREE_CODE (body) == MUST_NOT_THROW_EXPR) { noex = MUST_NOT_THROW_COND (body); if (noex == NULL_TREE) noex = boolean_true_node; body = TREE_OPERAND (body, 0); } stmt = begin_transaction_stmt (input_location, NULL, flags); RECUR (body); finish_transaction_stmt (stmt, NULL, flags, RECUR (noex)); } else { stmt = build_transaction_expr (EXPR_LOCATION (t), RECUR (TRANSACTION_EXPR_BODY (t)), flags, NULL_TREE); RETURN (stmt); } } break; case MUST_NOT_THROW_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); tree cond = RECUR (MUST_NOT_THROW_COND (t)); RETURN (build_must_not_throw_expr (op0, cond)); } case EXPR_PACK_EXPANSION: error ("invalid use of pack expansion expression"); RETURN (error_mark_node); case NONTYPE_ARGUMENT_PACK: error ("use %<...%> to expand argument pack"); RETURN (error_mark_node); case COMPOUND_EXPR: tmp = RECUR (TREE_OPERAND (t, 0)); if (tmp == NULL_TREE) /* If the first operand was a statement, we're done with it. */ RETURN (RECUR (TREE_OPERAND (t, 1))); RETURN (build_x_compound_expr (EXPR_LOCATION (t), tmp, RECUR (TREE_OPERAND (t, 1)), complain)); case ANNOTATE_EXPR: tmp = RECUR (TREE_OPERAND (t, 0)); RETURN (build3_loc (EXPR_LOCATION (t), ANNOTATE_EXPR, TREE_TYPE (tmp), tmp, RECUR (TREE_OPERAND (t, 1)), RECUR (TREE_OPERAND (t, 2)))); default: gcc_assert (!STATEMENT_CODE_P (TREE_CODE (t))); RETURN (tsubst_copy_and_build (t, args, complain, in_decl, /*function_p=*/false, integral_constant_expression_p)); } RETURN (NULL_TREE); out: input_location = loc; return r; #undef RECUR #undef RETURN } /* Instantiate the special body of the artificial DECL_OMP_DECLARE_REDUCTION function. For description of the body see comment above cp_parser_omp_declare_reduction_exprs. */ static void tsubst_omp_udr (tree t, tree args, tsubst_flags_t complain, tree in_decl) { if (t == NULL_TREE || t == error_mark_node) return; gcc_assert (TREE_CODE (t) == STATEMENT_LIST); tree_stmt_iterator tsi; int i; tree stmts[7]; memset (stmts, 0, sizeof stmts); for (i = 0, tsi = tsi_start (t); i < 7 && !tsi_end_p (tsi); i++, tsi_next (&tsi)) stmts[i] = tsi_stmt (tsi); gcc_assert (tsi_end_p (tsi)); if (i >= 3) { gcc_assert (TREE_CODE (stmts[0]) == DECL_EXPR && TREE_CODE (stmts[1]) == DECL_EXPR); tree omp_out = tsubst (DECL_EXPR_DECL (stmts[0]), args, complain, in_decl); tree omp_in = tsubst (DECL_EXPR_DECL (stmts[1]), args, complain, in_decl); DECL_CONTEXT (omp_out) = current_function_decl; DECL_CONTEXT (omp_in) = current_function_decl; keep_next_level (true); tree block = begin_omp_structured_block (); tsubst_expr (stmts[2], args, complain, in_decl, false); block = finish_omp_structured_block (block); block = maybe_cleanup_point_expr_void (block); add_decl_expr (omp_out); if (TREE_NO_WARNING (DECL_EXPR_DECL (stmts[0]))) TREE_NO_WARNING (omp_out) = 1; add_decl_expr (omp_in); finish_expr_stmt (block); } if (i >= 6) { gcc_assert (TREE_CODE (stmts[3]) == DECL_EXPR && TREE_CODE (stmts[4]) == DECL_EXPR); tree omp_priv = tsubst (DECL_EXPR_DECL (stmts[3]), args, complain, in_decl); tree omp_orig = tsubst (DECL_EXPR_DECL (stmts[4]), args, complain, in_decl); DECL_CONTEXT (omp_priv) = current_function_decl; DECL_CONTEXT (omp_orig) = current_function_decl; keep_next_level (true); tree block = begin_omp_structured_block (); tsubst_expr (stmts[5], args, complain, in_decl, false); block = finish_omp_structured_block (block); block = maybe_cleanup_point_expr_void (block); cp_walk_tree (&block, cp_remove_omp_priv_cleanup_stmt, omp_priv, NULL); add_decl_expr (omp_priv); add_decl_expr (omp_orig); finish_expr_stmt (block); if (i == 7) add_decl_expr (omp_orig); } } /* T is a postfix-expression that is not being used in a function call. Return the substituted version of T. */ static tree tsubst_non_call_postfix_expression (tree t, tree args, tsubst_flags_t complain, tree in_decl) { if (TREE_CODE (t) == SCOPE_REF) t = tsubst_qualified_id (t, args, complain, in_decl, /*done=*/false, /*address_p=*/false); else t = tsubst_copy_and_build (t, args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/false); return t; } /* T is a LAMBDA_EXPR. Generate a new LAMBDA_EXPR for the current instantiation context. Instantiating a pack expansion containing a lambda might result in multiple lambdas all based on the same lambda in the template. */ tree tsubst_lambda_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree oldfn = lambda_function (t); in_decl = oldfn; tree r = build_lambda_expr (); LAMBDA_EXPR_LOCATION (r) = LAMBDA_EXPR_LOCATION (t); LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (r) = LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (t); LAMBDA_EXPR_MUTABLE_P (r) = LAMBDA_EXPR_MUTABLE_P (t); if (LAMBDA_EXPR_EXTRA_SCOPE (t) == NULL_TREE) LAMBDA_EXPR_EXTRA_SCOPE (r) = NULL_TREE; else record_lambda_scope (r); gcc_assert (LAMBDA_EXPR_THIS_CAPTURE (t) == NULL_TREE && LAMBDA_EXPR_PENDING_PROXIES (t) == NULL); for (tree cap = LAMBDA_EXPR_CAPTURE_LIST (t); cap; cap = TREE_CHAIN (cap)) { tree field = TREE_PURPOSE (cap); if (PACK_EXPANSION_P (field)) field = PACK_EXPANSION_PATTERN (field); field = tsubst_decl (field, args, complain); if (field == error_mark_node) return error_mark_node; tree init = TREE_VALUE (cap); if (PACK_EXPANSION_P (init)) init = tsubst_pack_expansion (init, args, complain, in_decl); else init = tsubst_copy_and_build (init, args, complain, in_decl, /*fn*/false, /*constexpr*/false); if (TREE_CODE (field) == TREE_VEC) { int len = TREE_VEC_LENGTH (field); gcc_assert (TREE_CODE (init) == TREE_VEC && TREE_VEC_LENGTH (init) == len); for (int i = 0; i < len; ++i) LAMBDA_EXPR_CAPTURE_LIST (r) = tree_cons (TREE_VEC_ELT (field, i), TREE_VEC_ELT (init, i), LAMBDA_EXPR_CAPTURE_LIST (r)); } else { LAMBDA_EXPR_CAPTURE_LIST (r) = tree_cons (field, init, LAMBDA_EXPR_CAPTURE_LIST (r)); if (id_equal (DECL_NAME (field), "__this")) LAMBDA_EXPR_THIS_CAPTURE (r) = field; } } tree type = begin_lambda_type (r); if (type == error_mark_node) return error_mark_node; /* Do this again now that LAMBDA_EXPR_EXTRA_SCOPE is set. */ determine_visibility (TYPE_NAME (type)); register_capture_members (LAMBDA_EXPR_CAPTURE_LIST (r)); tree oldtmpl = (generic_lambda_fn_p (oldfn) ? DECL_TI_TEMPLATE (oldfn) : NULL_TREE); tree fntype = static_fn_type (oldfn); if (oldtmpl) ++processing_template_decl; fntype = tsubst (fntype, args, complain, in_decl); if (oldtmpl) --processing_template_decl; if (fntype == error_mark_node) r = error_mark_node; else { /* Fix the type of 'this'. */ fntype = build_memfn_type (fntype, type, type_memfn_quals (fntype), type_memfn_rqual (fntype)); tree fn, tmpl; if (oldtmpl) { tmpl = tsubst_template_decl (oldtmpl, args, complain, fntype); fn = DECL_TEMPLATE_RESULT (tmpl); finish_member_declaration (tmpl); } else { tmpl = NULL_TREE; fn = tsubst_function_decl (oldfn, args, complain, fntype); finish_member_declaration (fn); } /* Let finish_function set this. */ DECL_DECLARED_CONSTEXPR_P (fn) = false; bool nested = cfun; if (nested) push_function_context (); else /* Still increment function_depth so that we don't GC in the middle of an expression. */ ++function_depth; local_specialization_stack s (lss_copy); tree body = start_lambda_function (fn, r); register_parameter_specializations (oldfn, fn); tsubst_expr (DECL_SAVED_TREE (oldfn), args, complain, r, /*constexpr*/false); finish_lambda_function (body); if (nested) pop_function_context (); else --function_depth; /* The capture list was built up in reverse order; fix that now. */ LAMBDA_EXPR_CAPTURE_LIST (r) = nreverse (LAMBDA_EXPR_CAPTURE_LIST (r)); LAMBDA_EXPR_THIS_CAPTURE (r) = NULL_TREE; maybe_add_lambda_conv_op (type); } finish_struct (type, /*attr*/NULL_TREE); insert_pending_capture_proxies (); return r; } /* Like tsubst but deals with expressions and performs semantic analysis. FUNCTION_P is true if T is the "F" in "F (ARGS)". */ tree tsubst_copy_and_build (tree t, tree args, tsubst_flags_t complain, tree in_decl, bool function_p, bool integral_constant_expression_p) { #define RETURN(EXP) do { retval = (EXP); goto out; } while(0) #define RECUR(NODE) \ tsubst_copy_and_build (NODE, args, complain, in_decl, \ /*function_p=*/false, \ integral_constant_expression_p) tree retval, op1; location_t loc; if (t == NULL_TREE || t == error_mark_node) return t; loc = input_location; if (EXPR_HAS_LOCATION (t)) input_location = EXPR_LOCATION (t); /* N3276 decltype magic only applies to calls at the top level or on the right side of a comma. */ tsubst_flags_t decltype_flag = (complain & tf_decltype); complain &= ~tf_decltype; switch (TREE_CODE (t)) { case USING_DECL: t = DECL_NAME (t); /* Fall through. */ case IDENTIFIER_NODE: { tree decl; cp_id_kind idk; bool non_integral_constant_expression_p; const char *error_msg; if (IDENTIFIER_CONV_OP_P (t)) { tree new_type = tsubst (TREE_TYPE (t), args, complain, in_decl); t = make_conv_op_name (new_type); } /* Look up the name. */ decl = lookup_name (t); /* By convention, expressions use ERROR_MARK_NODE to indicate failure, not NULL_TREE. */ if (decl == NULL_TREE) decl = error_mark_node; decl = finish_id_expression (t, decl, NULL_TREE, &idk, integral_constant_expression_p, /*allow_non_integral_constant_expression_p=*/(cxx_dialect >= cxx11), &non_integral_constant_expression_p, /*template_p=*/false, /*done=*/true, /*address_p=*/false, /*template_arg_p=*/false, &error_msg, input_location); if (error_msg) error (error_msg); if (!function_p && identifier_p (decl)) { if (complain & tf_error) unqualified_name_lookup_error (decl); decl = error_mark_node; } RETURN (decl); } case TEMPLATE_ID_EXPR: { tree object; tree templ = RECUR (TREE_OPERAND (t, 0)); tree targs = TREE_OPERAND (t, 1); if (targs) targs = tsubst_template_args (targs, args, complain, in_decl); if (targs == error_mark_node) RETURN (error_mark_node); if (TREE_CODE (templ) == SCOPE_REF) { tree name = TREE_OPERAND (templ, 1); tree tid = lookup_template_function (name, targs); TREE_OPERAND (templ, 1) = tid; RETURN (templ); } if (variable_template_p (templ)) RETURN (lookup_and_finish_template_variable (templ, targs, complain)); if (TREE_CODE (templ) == COMPONENT_REF) { object = TREE_OPERAND (templ, 0); templ = TREE_OPERAND (templ, 1); } else object = NULL_TREE; templ = lookup_template_function (templ, targs); if (object) RETURN (build3 (COMPONENT_REF, TREE_TYPE (templ), object, templ, NULL_TREE)); else RETURN (baselink_for_fns (templ)); } case INDIRECT_REF: { tree r = RECUR (TREE_OPERAND (t, 0)); if (REFERENCE_REF_P (t)) { /* A type conversion to reference type will be enclosed in such an indirect ref, but the substitution of the cast will have also added such an indirect ref. */ r = convert_from_reference (r); } else r = build_x_indirect_ref (input_location, r, RO_UNARY_STAR, complain|decltype_flag); if (REF_PARENTHESIZED_P (t)) r = force_paren_expr (r); RETURN (r); } case NOP_EXPR: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); tree op0 = RECUR (TREE_OPERAND (t, 0)); RETURN (build_nop (type, op0)); } case IMPLICIT_CONV_EXPR: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); tree expr = RECUR (TREE_OPERAND (t, 0)); if (dependent_type_p (type) || type_dependent_expression_p (expr)) { retval = copy_node (t); TREE_TYPE (retval) = type; TREE_OPERAND (retval, 0) = expr; RETURN (retval); } if (IMPLICIT_CONV_EXPR_NONTYPE_ARG (t)) /* We'll pass this to convert_nontype_argument again, we don't need to actually perform any conversion here. */ RETURN (expr); int flags = LOOKUP_IMPLICIT; if (IMPLICIT_CONV_EXPR_DIRECT_INIT (t)) flags = LOOKUP_NORMAL; RETURN (perform_implicit_conversion_flags (type, expr, complain, flags)); } case CONVERT_EXPR: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); tree op0 = RECUR (TREE_OPERAND (t, 0)); if (op0 == error_mark_node) RETURN (error_mark_node); RETURN (build1 (CONVERT_EXPR, type, op0)); } case CAST_EXPR: case REINTERPRET_CAST_EXPR: case CONST_CAST_EXPR: case DYNAMIC_CAST_EXPR: case STATIC_CAST_EXPR: { tree type; tree op, r = NULL_TREE; type = tsubst (TREE_TYPE (t), args, complain, in_decl); if (integral_constant_expression_p && !cast_valid_in_integral_constant_expression_p (type)) { if (complain & tf_error) error ("a cast to a type other than an integral or " "enumeration type cannot appear in a constant-expression"); RETURN (error_mark_node); } op = RECUR (TREE_OPERAND (t, 0)); warning_sentinel s(warn_useless_cast); warning_sentinel s2(warn_ignored_qualifiers); switch (TREE_CODE (t)) { case CAST_EXPR: r = build_functional_cast (type, op, complain); break; case REINTERPRET_CAST_EXPR: r = build_reinterpret_cast (type, op, complain); break; case CONST_CAST_EXPR: r = build_const_cast (type, op, complain); break; case DYNAMIC_CAST_EXPR: r = build_dynamic_cast (type, op, complain); break; case STATIC_CAST_EXPR: r = build_static_cast (type, op, complain); break; default: gcc_unreachable (); } RETURN (r); } case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: op1 = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0), args, complain, in_decl); RETURN (build_x_unary_op (input_location, TREE_CODE (t), op1, complain|decltype_flag)); case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case NEGATE_EXPR: case BIT_NOT_EXPR: case ABS_EXPR: case TRUTH_NOT_EXPR: case UNARY_PLUS_EXPR: /* Unary + */ case REALPART_EXPR: case IMAGPART_EXPR: RETURN (build_x_unary_op (input_location, TREE_CODE (t), RECUR (TREE_OPERAND (t, 0)), complain|decltype_flag)); case FIX_TRUNC_EXPR: gcc_unreachable (); case ADDR_EXPR: op1 = TREE_OPERAND (t, 0); if (TREE_CODE (op1) == LABEL_DECL) RETURN (finish_label_address_expr (DECL_NAME (op1), EXPR_LOCATION (op1))); if (TREE_CODE (op1) == SCOPE_REF) op1 = tsubst_qualified_id (op1, args, complain, in_decl, /*done=*/true, /*address_p=*/true); else op1 = tsubst_non_call_postfix_expression (op1, args, complain, in_decl); RETURN (build_x_unary_op (input_location, ADDR_EXPR, op1, complain|decltype_flag)); case PLUS_EXPR: case MINUS_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case EXACT_DIV_EXPR: case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case TRUNC_MOD_EXPR: case FLOOR_MOD_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case RSHIFT_EXPR: case LSHIFT_EXPR: case RROTATE_EXPR: case LROTATE_EXPR: case EQ_EXPR: case NE_EXPR: case MAX_EXPR: case MIN_EXPR: case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR: case MEMBER_REF: case DOTSTAR_EXPR: { warning_sentinel s1(warn_type_limits); warning_sentinel s2(warn_div_by_zero); warning_sentinel s3(warn_logical_op); warning_sentinel s4(warn_tautological_compare); tree op0 = RECUR (TREE_OPERAND (t, 0)); tree op1 = RECUR (TREE_OPERAND (t, 1)); tree r = build_x_binary_op (input_location, TREE_CODE (t), op0, (TREE_NO_WARNING (TREE_OPERAND (t, 0)) ? ERROR_MARK : TREE_CODE (TREE_OPERAND (t, 0))), op1, (TREE_NO_WARNING (TREE_OPERAND (t, 1)) ? ERROR_MARK : TREE_CODE (TREE_OPERAND (t, 1))), /*overload=*/NULL, complain|decltype_flag); if (EXPR_P (r) && TREE_NO_WARNING (t)) TREE_NO_WARNING (r) = TREE_NO_WARNING (t); RETURN (r); } case POINTER_PLUS_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); tree op1 = RECUR (TREE_OPERAND (t, 1)); RETURN (fold_build_pointer_plus (op0, op1)); } case SCOPE_REF: RETURN (tsubst_qualified_id (t, args, complain, in_decl, /*done=*/true, /*address_p=*/false)); case ARRAY_REF: op1 = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0), args, complain, in_decl); RETURN (build_x_array_ref (EXPR_LOCATION (t), op1, RECUR (TREE_OPERAND (t, 1)), complain|decltype_flag)); case SIZEOF_EXPR: if (PACK_EXPANSION_P (TREE_OPERAND (t, 0)) || ARGUMENT_PACK_P (TREE_OPERAND (t, 0))) RETURN (tsubst_copy (t, args, complain, in_decl)); /* Fall through */ case ALIGNOF_EXPR: { tree r; op1 = TREE_OPERAND (t, 0); if (TREE_CODE (t) == SIZEOF_EXPR && SIZEOF_EXPR_TYPE_P (t)) op1 = TREE_TYPE (op1); bool std_alignof = (TREE_CODE (t) == ALIGNOF_EXPR && ALIGNOF_EXPR_STD_P (t)); if (!args) { /* When there are no ARGS, we are trying to evaluate a non-dependent expression from the parser. Trying to do the substitutions may not work. */ if (!TYPE_P (op1)) op1 = TREE_TYPE (op1); } else { ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; if (TYPE_P (op1)) op1 = tsubst (op1, args, complain, in_decl); else op1 = tsubst_copy_and_build (op1, args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/ false); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; } if (TYPE_P (op1)) r = cxx_sizeof_or_alignof_type (op1, TREE_CODE (t), std_alignof, complain & tf_error); else r = cxx_sizeof_or_alignof_expr (op1, TREE_CODE (t), complain & tf_error); if (TREE_CODE (t) == SIZEOF_EXPR && r != error_mark_node) { if (TREE_CODE (r) != SIZEOF_EXPR || TYPE_P (op1)) { if (!processing_template_decl && TYPE_P (op1)) { r = build_min (SIZEOF_EXPR, size_type_node, build1 (NOP_EXPR, op1, error_mark_node)); SIZEOF_EXPR_TYPE_P (r) = 1; } else r = build_min (SIZEOF_EXPR, size_type_node, op1); TREE_SIDE_EFFECTS (r) = 0; TREE_READONLY (r) = 1; } SET_EXPR_LOCATION (r, EXPR_LOCATION (t)); } RETURN (r); } case AT_ENCODE_EXPR: { op1 = TREE_OPERAND (t, 0); ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; op1 = tsubst_copy_and_build (op1, args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/false); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; RETURN (objc_build_encode_expr (op1)); } case NOEXCEPT_EXPR: op1 = TREE_OPERAND (t, 0); ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; ++cp_noexcept_operand; op1 = tsubst_copy_and_build (op1, args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/false); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; --cp_noexcept_operand; RETURN (finish_noexcept_expr (op1, complain)); case MODOP_EXPR: { warning_sentinel s(warn_div_by_zero); tree lhs = RECUR (TREE_OPERAND (t, 0)); tree rhs = RECUR (TREE_OPERAND (t, 2)); tree r = build_x_modify_expr (EXPR_LOCATION (t), lhs, TREE_CODE (TREE_OPERAND (t, 1)), rhs, complain|decltype_flag); /* TREE_NO_WARNING must be set if either the expression was parenthesized or it uses an operator such as >>= rather than plain assignment. In the former case, it was already set and must be copied. In the latter case, build_x_modify_expr sets it and it must not be reset here. */ if (TREE_NO_WARNING (t)) TREE_NO_WARNING (r) = TREE_NO_WARNING (t); RETURN (r); } case ARROW_EXPR: op1 = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0), args, complain, in_decl); /* Remember that there was a reference to this entity. */ if (DECL_P (op1) && !mark_used (op1, complain) && !(complain & tf_error)) RETURN (error_mark_node); RETURN (build_x_arrow (input_location, op1, complain)); case NEW_EXPR: { tree placement = RECUR (TREE_OPERAND (t, 0)); tree init = RECUR (TREE_OPERAND (t, 3)); vec<tree, va_gc> *placement_vec; vec<tree, va_gc> *init_vec; tree ret; if (placement == NULL_TREE) placement_vec = NULL; else { placement_vec = make_tree_vector (); for (; placement != NULL_TREE; placement = TREE_CHAIN (placement)) vec_safe_push (placement_vec, TREE_VALUE (placement)); } /* If there was an initializer in the original tree, but it instantiated to an empty list, then we should pass a non-NULL empty vector to tell build_new that it was an empty initializer() rather than no initializer. This can only happen when the initializer is a pack expansion whose parameter packs are of length zero. */ if (init == NULL_TREE && TREE_OPERAND (t, 3) == NULL_TREE) init_vec = NULL; else { init_vec = make_tree_vector (); if (init == void_node) gcc_assert (init_vec != NULL); else { for (; init != NULL_TREE; init = TREE_CHAIN (init)) vec_safe_push (init_vec, TREE_VALUE (init)); } } tree op1 = tsubst (TREE_OPERAND (t, 1), args, complain, in_decl); tree op2 = RECUR (TREE_OPERAND (t, 2)); ret = build_new (&placement_vec, op1, op2, &init_vec, NEW_EXPR_USE_GLOBAL (t), complain); if (placement_vec != NULL) release_tree_vector (placement_vec); if (init_vec != NULL) release_tree_vector (init_vec); RETURN (ret); } case DELETE_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); tree op1 = RECUR (TREE_OPERAND (t, 1)); RETURN (delete_sanity (op0, op1, DELETE_EXPR_USE_VEC (t), DELETE_EXPR_USE_GLOBAL (t), complain)); } case COMPOUND_EXPR: { tree op0 = tsubst_copy_and_build (TREE_OPERAND (t, 0), args, complain & ~tf_decltype, in_decl, /*function_p=*/false, integral_constant_expression_p); RETURN (build_x_compound_expr (EXPR_LOCATION (t), op0, RECUR (TREE_OPERAND (t, 1)), complain|decltype_flag)); } case CALL_EXPR: { tree function; vec<tree, va_gc> *call_args; unsigned int nargs, i; bool qualified_p; bool koenig_p; tree ret; function = CALL_EXPR_FN (t); /* Internal function with no arguments. */ if (function == NULL_TREE && call_expr_nargs (t) == 0) RETURN (t); /* When we parsed the expression, we determined whether or not Koenig lookup should be performed. */ koenig_p = KOENIG_LOOKUP_P (t); if (function == NULL_TREE) { koenig_p = false; qualified_p = false; } else if (TREE_CODE (function) == SCOPE_REF) { qualified_p = true; function = tsubst_qualified_id (function, args, complain, in_decl, /*done=*/false, /*address_p=*/false); } else if (koenig_p && identifier_p (function)) { /* Do nothing; calling tsubst_copy_and_build on an identifier would incorrectly perform unqualified lookup again. Note that we can also have an IDENTIFIER_NODE if the earlier unqualified lookup found a member function; in that case koenig_p will be false and we do want to do the lookup again to find the instantiated member function. FIXME but doing that causes c++/15272, so we need to stop using IDENTIFIER_NODE in that situation. */ qualified_p = false; } else { if (TREE_CODE (function) == COMPONENT_REF) { tree op = TREE_OPERAND (function, 1); qualified_p = (TREE_CODE (op) == SCOPE_REF || (BASELINK_P (op) && BASELINK_QUALIFIED_P (op))); } else qualified_p = false; if (TREE_CODE (function) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (function, 0)) == FUNCTION_DECL) /* Avoid error about taking the address of a constructor. */ function = TREE_OPERAND (function, 0); function = tsubst_copy_and_build (function, args, complain, in_decl, !qualified_p, integral_constant_expression_p); if (BASELINK_P (function)) qualified_p = true; } nargs = call_expr_nargs (t); call_args = make_tree_vector (); for (i = 0; i < nargs; ++i) { tree arg = CALL_EXPR_ARG (t, i); if (!PACK_EXPANSION_P (arg)) vec_safe_push (call_args, RECUR (CALL_EXPR_ARG (t, i))); else { /* Expand the pack expansion and push each entry onto CALL_ARGS. */ arg = tsubst_pack_expansion (arg, args, complain, in_decl); if (TREE_CODE (arg) == TREE_VEC) { unsigned int len, j; len = TREE_VEC_LENGTH (arg); for (j = 0; j < len; ++j) { tree value = TREE_VEC_ELT (arg, j); if (value != NULL_TREE) value = convert_from_reference (value); vec_safe_push (call_args, value); } } else { /* A partial substitution. Add one entry. */ vec_safe_push (call_args, arg); } } } /* We do not perform argument-dependent lookup if normal lookup finds a non-function, in accordance with the expected resolution of DR 218. */ if (koenig_p && ((is_overloaded_fn (function) /* If lookup found a member function, the Koenig lookup is not appropriate, even if an unqualified-name was used to denote the function. */ && !DECL_FUNCTION_MEMBER_P (get_first_fn (function))) || identifier_p (function)) /* Only do this when substitution turns a dependent call into a non-dependent call. */ && type_dependent_expression_p_push (t) && !any_type_dependent_arguments_p (call_args)) function = perform_koenig_lookup (function, call_args, tf_none); if (function != NULL_TREE && identifier_p (function) && !any_type_dependent_arguments_p (call_args)) { if (koenig_p && (complain & tf_warning_or_error)) { /* For backwards compatibility and good diagnostics, try the unqualified lookup again if we aren't in SFINAE context. */ tree unq = (tsubst_copy_and_build (function, args, complain, in_decl, true, integral_constant_expression_p)); if (unq == error_mark_node) { release_tree_vector (call_args); RETURN (error_mark_node); } if (unq != function) { /* In a lambda fn, we have to be careful to not introduce new this captures. Legacy code can't be using lambdas anyway, so it's ok to be stricter. */ bool in_lambda = (current_class_type && LAMBDA_TYPE_P (current_class_type)); char const *const msg = G_("%qD was not declared in this scope, " "and no declarations were found by " "argument-dependent lookup at the point " "of instantiation"); bool diag = true; if (in_lambda) error_at (EXPR_LOC_OR_LOC (t, input_location), msg, function); else diag = permerror (EXPR_LOC_OR_LOC (t, input_location), msg, function); if (diag) { tree fn = unq; if (INDIRECT_REF_P (fn)) fn = TREE_OPERAND (fn, 0); if (is_overloaded_fn (fn)) fn = get_first_fn (fn); if (!DECL_P (fn)) /* Can't say anything more. */; else if (DECL_CLASS_SCOPE_P (fn)) { location_t loc = EXPR_LOC_OR_LOC (t, input_location); inform (loc, "declarations in dependent base %qT are " "not found by unqualified lookup", DECL_CLASS_CONTEXT (fn)); if (current_class_ptr) inform (loc, "use %<this->%D%> instead", function); else inform (loc, "use %<%T::%D%> instead", current_class_name, function); } else inform (DECL_SOURCE_LOCATION (fn), "%qD declared here, later in the " "translation unit", fn); if (in_lambda) { release_tree_vector (call_args); RETURN (error_mark_node); } } function = unq; } } if (identifier_p (function)) { if (complain & tf_error) unqualified_name_lookup_error (function); release_tree_vector (call_args); RETURN (error_mark_node); } } /* Remember that there was a reference to this entity. */ if (function != NULL_TREE && DECL_P (function) && !mark_used (function, complain) && !(complain & tf_error)) { release_tree_vector (call_args); RETURN (error_mark_node); } /* Put back tf_decltype for the actual call. */ complain |= decltype_flag; if (function == NULL_TREE) switch (CALL_EXPR_IFN (t)) { case IFN_LAUNDER: gcc_assert (nargs == 1); if (vec_safe_length (call_args) != 1) { error_at (EXPR_LOC_OR_LOC (t, input_location), "wrong number of arguments to " "%<__builtin_launder%>"); ret = error_mark_node; } else ret = finish_builtin_launder (EXPR_LOC_OR_LOC (t, input_location), (*call_args)[0], complain); break; default: /* Unsupported internal function with arguments. */ gcc_unreachable (); } else if (TREE_CODE (function) == OFFSET_REF) ret = build_offset_ref_call_from_tree (function, &call_args, complain); else if (TREE_CODE (function) == COMPONENT_REF) { tree instance = TREE_OPERAND (function, 0); tree fn = TREE_OPERAND (function, 1); if (processing_template_decl && (type_dependent_expression_p (instance) || (!BASELINK_P (fn) && TREE_CODE (fn) != FIELD_DECL) || type_dependent_expression_p (fn) || any_type_dependent_arguments_p (call_args))) ret = build_min_nt_call_vec (function, call_args); else if (!BASELINK_P (fn)) ret = finish_call_expr (function, &call_args, /*disallow_virtual=*/false, /*koenig_p=*/false, complain); else ret = (build_new_method_call (instance, fn, &call_args, NULL_TREE, qualified_p ? LOOKUP_NONVIRTUAL : LOOKUP_NORMAL, /*fn_p=*/NULL, complain)); } else ret = finish_call_expr (function, &call_args, /*disallow_virtual=*/qualified_p, koenig_p, complain); release_tree_vector (call_args); if (ret != error_mark_node) { bool op = CALL_EXPR_OPERATOR_SYNTAX (t); bool ord = CALL_EXPR_ORDERED_ARGS (t); bool rev = CALL_EXPR_REVERSE_ARGS (t); bool thk = CALL_FROM_THUNK_P (t); if (op || ord || rev || thk) { function = extract_call_expr (ret); CALL_EXPR_OPERATOR_SYNTAX (function) = op; CALL_EXPR_ORDERED_ARGS (function) = ord; CALL_EXPR_REVERSE_ARGS (function) = rev; if (thk) { if (TREE_CODE (function) == CALL_EXPR) CALL_FROM_THUNK_P (function) = true; else AGGR_INIT_FROM_THUNK_P (function) = true; /* The thunk location is not interesting. */ SET_EXPR_LOCATION (function, UNKNOWN_LOCATION); } } } RETURN (ret); } case COND_EXPR: { tree cond = RECUR (TREE_OPERAND (t, 0)); tree folded_cond = fold_non_dependent_expr (cond); tree exp1, exp2; if (TREE_CODE (folded_cond) == INTEGER_CST) { if (integer_zerop (folded_cond)) { ++c_inhibit_evaluation_warnings; exp1 = RECUR (TREE_OPERAND (t, 1)); --c_inhibit_evaluation_warnings; exp2 = RECUR (TREE_OPERAND (t, 2)); } else { exp1 = RECUR (TREE_OPERAND (t, 1)); ++c_inhibit_evaluation_warnings; exp2 = RECUR (TREE_OPERAND (t, 2)); --c_inhibit_evaluation_warnings; } cond = folded_cond; } else { exp1 = RECUR (TREE_OPERAND (t, 1)); exp2 = RECUR (TREE_OPERAND (t, 2)); } warning_sentinel s(warn_duplicated_branches); RETURN (build_x_conditional_expr (EXPR_LOCATION (t), cond, exp1, exp2, complain)); } case PSEUDO_DTOR_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); tree op1 = RECUR (TREE_OPERAND (t, 1)); tree op2 = tsubst (TREE_OPERAND (t, 2), args, complain, in_decl); RETURN (finish_pseudo_destructor_expr (op0, op1, op2, input_location)); } case TREE_LIST: { tree purpose, value, chain; if (t == void_list_node) RETURN (t); if ((TREE_PURPOSE (t) && PACK_EXPANSION_P (TREE_PURPOSE (t))) || (TREE_VALUE (t) && PACK_EXPANSION_P (TREE_VALUE (t)))) { /* We have pack expansions, so expand those and create a new list out of it. */ tree purposevec = NULL_TREE; tree valuevec = NULL_TREE; tree chain; int i, len = -1; /* Expand the argument expressions. */ if (TREE_PURPOSE (t)) purposevec = tsubst_pack_expansion (TREE_PURPOSE (t), args, complain, in_decl); if (TREE_VALUE (t)) valuevec = tsubst_pack_expansion (TREE_VALUE (t), args, complain, in_decl); /* Build the rest of the list. */ chain = TREE_CHAIN (t); if (chain && chain != void_type_node) chain = RECUR (chain); /* Determine the number of arguments. */ if (purposevec && TREE_CODE (purposevec) == TREE_VEC) { len = TREE_VEC_LENGTH (purposevec); gcc_assert (!valuevec || len == TREE_VEC_LENGTH (valuevec)); } else if (TREE_CODE (valuevec) == TREE_VEC) len = TREE_VEC_LENGTH (valuevec); else { /* Since we only performed a partial substitution into the argument pack, we only RETURN (a single list node. */ if (purposevec == TREE_PURPOSE (t) && valuevec == TREE_VALUE (t) && chain == TREE_CHAIN (t)) RETURN (t); RETURN (tree_cons (purposevec, valuevec, chain)); } /* Convert the argument vectors into a TREE_LIST */ i = len; while (i > 0) { /* Grab the Ith values. */ i--; purpose = purposevec ? TREE_VEC_ELT (purposevec, i) : NULL_TREE; value = valuevec ? convert_from_reference (TREE_VEC_ELT (valuevec, i)) : NULL_TREE; /* Build the list (backwards). */ chain = tree_cons (purpose, value, chain); } RETURN (chain); } purpose = TREE_PURPOSE (t); if (purpose) purpose = RECUR (purpose); value = TREE_VALUE (t); if (value) value = RECUR (value); chain = TREE_CHAIN (t); if (chain && chain != void_type_node) chain = RECUR (chain); if (purpose == TREE_PURPOSE (t) && value == TREE_VALUE (t) && chain == TREE_CHAIN (t)) RETURN (t); RETURN (tree_cons (purpose, value, chain)); } case COMPONENT_REF: { tree object; tree object_type; tree member; tree r; object = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0), args, complain, in_decl); /* Remember that there was a reference to this entity. */ if (DECL_P (object) && !mark_used (object, complain) && !(complain & tf_error)) RETURN (error_mark_node); object_type = TREE_TYPE (object); member = TREE_OPERAND (t, 1); if (BASELINK_P (member)) member = tsubst_baselink (member, non_reference (TREE_TYPE (object)), args, complain, in_decl); else member = tsubst_copy (member, args, complain, in_decl); if (member == error_mark_node) RETURN (error_mark_node); if (TREE_CODE (member) == FIELD_DECL) { r = finish_non_static_data_member (member, object, NULL_TREE); if (TREE_CODE (r) == COMPONENT_REF) REF_PARENTHESIZED_P (r) = REF_PARENTHESIZED_P (t); RETURN (r); } else if (type_dependent_expression_p (object)) /* We can't do much here. */; else if (!CLASS_TYPE_P (object_type)) { if (scalarish_type_p (object_type)) { tree s = NULL_TREE; tree dtor = member; if (TREE_CODE (dtor) == SCOPE_REF) { s = TREE_OPERAND (dtor, 0); dtor = TREE_OPERAND (dtor, 1); } if (TREE_CODE (dtor) == BIT_NOT_EXPR) { dtor = TREE_OPERAND (dtor, 0); if (TYPE_P (dtor)) RETURN (finish_pseudo_destructor_expr (object, s, dtor, input_location)); } } } else if (TREE_CODE (member) == SCOPE_REF && TREE_CODE (TREE_OPERAND (member, 1)) == TEMPLATE_ID_EXPR) { /* Lookup the template functions now that we know what the scope is. */ tree scope = TREE_OPERAND (member, 0); tree tmpl = TREE_OPERAND (TREE_OPERAND (member, 1), 0); tree args = TREE_OPERAND (TREE_OPERAND (member, 1), 1); member = lookup_qualified_name (scope, tmpl, /*is_type_p=*/false, /*complain=*/false); if (BASELINK_P (member)) { BASELINK_FUNCTIONS (member) = build_nt (TEMPLATE_ID_EXPR, BASELINK_FUNCTIONS (member), args); member = (adjust_result_of_qualified_name_lookup (member, BINFO_TYPE (BASELINK_BINFO (member)), object_type)); } else { qualified_name_lookup_error (scope, tmpl, member, input_location); RETURN (error_mark_node); } } else if (TREE_CODE (member) == SCOPE_REF && !CLASS_TYPE_P (TREE_OPERAND (member, 0)) && TREE_CODE (TREE_OPERAND (member, 0)) != NAMESPACE_DECL) { if (complain & tf_error) { if (TYPE_P (TREE_OPERAND (member, 0))) error ("%qT is not a class or namespace", TREE_OPERAND (member, 0)); else error ("%qD is not a class or namespace", TREE_OPERAND (member, 0)); } RETURN (error_mark_node); } r = finish_class_member_access_expr (object, member, /*template_p=*/false, complain); if (TREE_CODE (r) == COMPONENT_REF) REF_PARENTHESIZED_P (r) = REF_PARENTHESIZED_P (t); RETURN (r); } case THROW_EXPR: RETURN (build_throw (RECUR (TREE_OPERAND (t, 0)))); case CONSTRUCTOR: { vec<constructor_elt, va_gc> *n; constructor_elt *ce; unsigned HOST_WIDE_INT idx; tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); bool process_index_p; int newlen; bool need_copy_p = false; tree r; if (type == error_mark_node) RETURN (error_mark_node); /* We do not want to process the index of aggregate initializers as they are identifier nodes which will be looked up by digest_init. */ process_index_p = !(type && MAYBE_CLASS_TYPE_P (type)); n = vec_safe_copy (CONSTRUCTOR_ELTS (t)); newlen = vec_safe_length (n); FOR_EACH_VEC_SAFE_ELT (n, idx, ce) { if (ce->index && process_index_p /* An identifier index is looked up in the type being initialized, not the current scope. */ && TREE_CODE (ce->index) != IDENTIFIER_NODE) ce->index = RECUR (ce->index); if (PACK_EXPANSION_P (ce->value)) { /* Substitute into the pack expansion. */ ce->value = tsubst_pack_expansion (ce->value, args, complain, in_decl); if (ce->value == error_mark_node || PACK_EXPANSION_P (ce->value)) ; else if (TREE_VEC_LENGTH (ce->value) == 1) /* Just move the argument into place. */ ce->value = TREE_VEC_ELT (ce->value, 0); else { /* Update the length of the final CONSTRUCTOR arguments vector, and note that we will need to copy.*/ newlen = newlen + TREE_VEC_LENGTH (ce->value) - 1; need_copy_p = true; } } else ce->value = RECUR (ce->value); } if (need_copy_p) { vec<constructor_elt, va_gc> *old_n = n; vec_alloc (n, newlen); FOR_EACH_VEC_ELT (*old_n, idx, ce) { if (TREE_CODE (ce->value) == TREE_VEC) { int i, len = TREE_VEC_LENGTH (ce->value); for (i = 0; i < len; ++i) CONSTRUCTOR_APPEND_ELT (n, 0, TREE_VEC_ELT (ce->value, i)); } else CONSTRUCTOR_APPEND_ELT (n, 0, ce->value); } } r = build_constructor (init_list_type_node, n); CONSTRUCTOR_IS_DIRECT_INIT (r) = CONSTRUCTOR_IS_DIRECT_INIT (t); if (TREE_HAS_CONSTRUCTOR (t)) { fcl_t cl = fcl_functional; if (CONSTRUCTOR_C99_COMPOUND_LITERAL (t)) cl = fcl_c99; RETURN (finish_compound_literal (type, r, complain, cl)); } TREE_TYPE (r) = type; RETURN (r); } case TYPEID_EXPR: { tree operand_0 = TREE_OPERAND (t, 0); if (TYPE_P (operand_0)) { operand_0 = tsubst (operand_0, args, complain, in_decl); RETURN (get_typeid (operand_0, complain)); } else { operand_0 = RECUR (operand_0); RETURN (build_typeid (operand_0, complain)); } } case VAR_DECL: if (!args) RETURN (t); /* Fall through */ case PARM_DECL: { tree r = tsubst_copy (t, args, complain, in_decl); /* ??? We're doing a subset of finish_id_expression here. */ if (VAR_P (r) && !processing_template_decl && !cp_unevaluated_operand && (TREE_STATIC (r) || DECL_EXTERNAL (r)) && CP_DECL_THREAD_LOCAL_P (r)) { if (tree wrap = get_tls_wrapper_fn (r)) /* Replace an evaluated use of the thread_local variable with a call to its wrapper. */ r = build_cxx_call (wrap, 0, NULL, tf_warning_or_error); } else if (outer_automatic_var_p (r)) r = process_outer_var_ref (r, complain); if (TREE_CODE (TREE_TYPE (t)) != REFERENCE_TYPE) /* If the original type was a reference, we'll be wrapped in the appropriate INDIRECT_REF. */ r = convert_from_reference (r); RETURN (r); } case VA_ARG_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); RETURN (build_x_va_arg (EXPR_LOCATION (t), op0, type)); } case OFFSETOF_EXPR: { tree object_ptr = tsubst_copy_and_build (TREE_OPERAND (t, 1), args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/false); RETURN (finish_offsetof (object_ptr, RECUR (TREE_OPERAND (t, 0)), EXPR_LOCATION (t))); } case ADDRESSOF_EXPR: RETURN (cp_build_addressof (EXPR_LOCATION (t), RECUR (TREE_OPERAND (t, 0)), complain)); case TRAIT_EXPR: { tree type1 = tsubst (TRAIT_EXPR_TYPE1 (t), args, complain, in_decl); tree type2 = TRAIT_EXPR_TYPE2 (t); if (type2 && TREE_CODE (type2) == TREE_LIST) type2 = RECUR (type2); else if (type2) type2 = tsubst (type2, args, complain, in_decl); RETURN (finish_trait_expr (TRAIT_EXPR_KIND (t), type1, type2)); } case STMT_EXPR: { tree old_stmt_expr = cur_stmt_expr; tree stmt_expr = begin_stmt_expr (); cur_stmt_expr = stmt_expr; tsubst_expr (STMT_EXPR_STMT (t), args, complain, in_decl, integral_constant_expression_p); stmt_expr = finish_stmt_expr (stmt_expr, false); cur_stmt_expr = old_stmt_expr; /* If the resulting list of expression statement is empty, fold it further into void_node. */ if (empty_expr_stmt_p (stmt_expr)) stmt_expr = void_node; RETURN (stmt_expr); } case LAMBDA_EXPR: { tree r = tsubst_lambda_expr (t, args, complain, in_decl); RETURN (build_lambda_object (r)); } case TARGET_EXPR: /* We can get here for a constant initializer of non-dependent type. FIXME stop folding in cp_parser_initializer_clause. */ { tree r = get_target_expr_sfinae (RECUR (TARGET_EXPR_INITIAL (t)), complain); RETURN (r); } case TRANSACTION_EXPR: RETURN (tsubst_expr(t, args, complain, in_decl, integral_constant_expression_p)); case PAREN_EXPR: RETURN (finish_parenthesized_expr (RECUR (TREE_OPERAND (t, 0)))); case VEC_PERM_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); tree op1 = RECUR (TREE_OPERAND (t, 1)); tree op2 = RECUR (TREE_OPERAND (t, 2)); RETURN (build_x_vec_perm_expr (input_location, op0, op1, op2, complain)); } case REQUIRES_EXPR: RETURN (tsubst_requires_expr (t, args, complain, in_decl)); case NON_LVALUE_EXPR: case VIEW_CONVERT_EXPR: /* We should only see these for location wrapper nodes, or within instantiate_non_dependent_expr (when args is NULL_TREE). */ gcc_assert (location_wrapper_p (t) || args == NULL_TREE); if (location_wrapper_p (t)) RETURN (maybe_wrap_with_location (RECUR (TREE_OPERAND (t, 0)), EXPR_LOCATION (t))); /* fallthrough. */ default: /* Handle Objective-C++ constructs, if appropriate. */ { tree subst = objcp_tsubst_copy_and_build (t, args, complain, in_decl, /*function_p=*/false); if (subst) RETURN (subst); } RETURN (tsubst_copy (t, args, complain, in_decl)); } #undef RECUR #undef RETURN out: input_location = loc; return retval; } /* Verify that the instantiated ARGS are valid. For type arguments, make sure that the type's linkage is ok. For non-type arguments, make sure they are constants if they are integral or enumerations. Emit an error under control of COMPLAIN, and return TRUE on error. */ static bool check_instantiated_arg (tree tmpl, tree t, tsubst_flags_t complain) { if (dependent_template_arg_p (t)) return false; if (ARGUMENT_PACK_P (t)) { tree vec = ARGUMENT_PACK_ARGS (t); int len = TREE_VEC_LENGTH (vec); bool result = false; int i; for (i = 0; i < len; ++i) if (check_instantiated_arg (tmpl, TREE_VEC_ELT (vec, i), complain)) result = true; return result; } else if (TYPE_P (t)) { /* [basic.link]: A name with no linkage (notably, the name of a class or enumeration declared in a local scope) shall not be used to declare an entity with linkage. This implies that names with no linkage cannot be used as template arguments DR 757 relaxes this restriction for C++0x. */ tree nt = (cxx_dialect > cxx98 ? NULL_TREE : no_linkage_check (t, /*relaxed_p=*/false)); if (nt) { /* DR 488 makes use of a type with no linkage cause type deduction to fail. */ if (complain & tf_error) { if (TYPE_UNNAMED_P (nt)) error ("%qT is/uses unnamed type", t); else error ("template argument for %qD uses local type %qT", tmpl, t); } return true; } /* In order to avoid all sorts of complications, we do not allow variably-modified types as template arguments. */ else if (variably_modified_type_p (t, NULL_TREE)) { if (complain & tf_error) error ("%qT is a variably modified type", t); return true; } } /* Class template and alias template arguments should be OK. */ else if (DECL_TYPE_TEMPLATE_P (t)) ; /* A non-type argument of integral or enumerated type must be a constant. */ else if (TREE_TYPE (t) && INTEGRAL_OR_ENUMERATION_TYPE_P (TREE_TYPE (t)) && !REFERENCE_REF_P (t) && !TREE_CONSTANT (t)) { if (complain & tf_error) error ("integral expression %qE is not constant", t); return true; } return false; } static bool check_instantiated_args (tree tmpl, tree args, tsubst_flags_t complain) { int ix, len = DECL_NTPARMS (tmpl); bool result = false; for (ix = 0; ix != len; ix++) { if (check_instantiated_arg (tmpl, TREE_VEC_ELT (args, ix), complain)) result = true; } if (result && (complain & tf_error)) error (" trying to instantiate %qD", tmpl); return result; } /* We're out of SFINAE context now, so generate diagnostics for the access errors we saw earlier when instantiating D from TMPL and ARGS. */ static void recheck_decl_substitution (tree d, tree tmpl, tree args) { tree pattern = DECL_TEMPLATE_RESULT (tmpl); tree type = TREE_TYPE (pattern); location_t loc = input_location; push_access_scope (d); push_deferring_access_checks (dk_no_deferred); input_location = DECL_SOURCE_LOCATION (pattern); tsubst (type, args, tf_warning_or_error, d); input_location = loc; pop_deferring_access_checks (); pop_access_scope (d); } /* Instantiate the indicated variable, function, or alias template TMPL with the template arguments in TARG_PTR. */ static tree instantiate_template_1 (tree tmpl, tree orig_args, tsubst_flags_t complain) { tree targ_ptr = orig_args; tree fndecl; tree gen_tmpl; tree spec; bool access_ok = true; if (tmpl == error_mark_node) return error_mark_node; gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL); /* If this function is a clone, handle it specially. */ if (DECL_CLONED_FUNCTION_P (tmpl)) { tree spec; tree clone; /* Use DECL_ABSTRACT_ORIGIN because only FUNCTION_DECLs have DECL_CLONED_FUNCTION. */ spec = instantiate_template (DECL_ABSTRACT_ORIGIN (tmpl), targ_ptr, complain); if (spec == error_mark_node) return error_mark_node; /* Look for the clone. */ FOR_EACH_CLONE (clone, spec) if (DECL_NAME (clone) == DECL_NAME (tmpl)) return clone; /* We should always have found the clone by now. */ gcc_unreachable (); return NULL_TREE; } if (targ_ptr == error_mark_node) return error_mark_node; /* Check to see if we already have this specialization. */ gen_tmpl = most_general_template (tmpl); if (TMPL_ARGS_DEPTH (targ_ptr) < TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (gen_tmpl))) /* targ_ptr only has the innermost template args, so add the outer ones from tmpl, which could be either a partial instantiation or gen_tmpl (in the case of a non-dependent call within a template definition). */ targ_ptr = (add_outermost_template_args (DECL_TI_ARGS (DECL_TEMPLATE_RESULT (tmpl)), targ_ptr)); /* It would be nice to avoid hashing here and then again in tsubst_decl, but it doesn't seem to be on the hot path. */ spec = retrieve_specialization (gen_tmpl, targ_ptr, 0); gcc_assert (tmpl == gen_tmpl || ((fndecl = retrieve_specialization (tmpl, orig_args, 0)) == spec) || fndecl == NULL_TREE); if (spec != NULL_TREE) { if (FNDECL_HAS_ACCESS_ERRORS (spec)) { if (complain & tf_error) recheck_decl_substitution (spec, gen_tmpl, targ_ptr); return error_mark_node; } return spec; } if (check_instantiated_args (gen_tmpl, INNERMOST_TEMPLATE_ARGS (targ_ptr), complain)) return error_mark_node; /* We are building a FUNCTION_DECL, during which the access of its parameters and return types have to be checked. However this FUNCTION_DECL which is the desired context for access checking is not built yet. We solve this chicken-and-egg problem by deferring all checks until we have the FUNCTION_DECL. */ push_deferring_access_checks (dk_deferred); /* Instantiation of the function happens in the context of the function template, not the context of the overload resolution we're doing. */ push_to_top_level (); /* If there are dependent arguments, e.g. because we're doing partial ordering, make sure processing_template_decl stays set. */ if (uses_template_parms (targ_ptr)) ++processing_template_decl; if (DECL_CLASS_SCOPE_P (gen_tmpl)) { tree ctx = tsubst_aggr_type (DECL_CONTEXT (gen_tmpl), targ_ptr, complain, gen_tmpl, true); push_nested_class (ctx); } tree pattern = DECL_TEMPLATE_RESULT (gen_tmpl); fndecl = NULL_TREE; if (VAR_P (pattern)) { /* We need to determine if we're using a partial or explicit specialization now, because the type of the variable could be different. */ tree tid = lookup_template_variable (gen_tmpl, targ_ptr); tree elt = most_specialized_partial_spec (tid, complain); if (elt == error_mark_node) pattern = error_mark_node; else if (elt) { tree partial_tmpl = TREE_VALUE (elt); tree partial_args = TREE_PURPOSE (elt); tree partial_pat = DECL_TEMPLATE_RESULT (partial_tmpl); fndecl = tsubst (partial_pat, partial_args, complain, gen_tmpl); } } /* Substitute template parameters to obtain the specialization. */ if (fndecl == NULL_TREE) fndecl = tsubst (pattern, targ_ptr, complain, gen_tmpl); if (DECL_CLASS_SCOPE_P (gen_tmpl)) pop_nested_class (); pop_from_top_level (); if (fndecl == error_mark_node) { pop_deferring_access_checks (); return error_mark_node; } /* The DECL_TI_TEMPLATE should always be the immediate parent template, not the most general template. */ DECL_TI_TEMPLATE (fndecl) = tmpl; DECL_TI_ARGS (fndecl) = targ_ptr; /* Now we know the specialization, compute access previously deferred. Do no access control for inheriting constructors, as we already checked access for the inherited constructor. */ if (!(flag_new_inheriting_ctors && DECL_INHERITED_CTOR (fndecl))) { push_access_scope (fndecl); if (!perform_deferred_access_checks (complain)) access_ok = false; pop_access_scope (fndecl); } pop_deferring_access_checks (); /* If we've just instantiated the main entry point for a function, instantiate all the alternate entry points as well. We do this by cloning the instantiation of the main entry point, not by instantiating the template clones. */ if (DECL_CHAIN (gen_tmpl) && DECL_CLONED_FUNCTION_P (DECL_CHAIN (gen_tmpl))) clone_function_decl (fndecl, /*update_methods=*/false); if (!access_ok) { if (!(complain & tf_error)) { /* Remember to reinstantiate when we're out of SFINAE so the user can see the errors. */ FNDECL_HAS_ACCESS_ERRORS (fndecl) = true; } return error_mark_node; } return fndecl; } /* Wrapper for instantiate_template_1. */ tree instantiate_template (tree tmpl, tree orig_args, tsubst_flags_t complain) { tree ret; timevar_push (TV_TEMPLATE_INST); ret = instantiate_template_1 (tmpl, orig_args, complain); timevar_pop (TV_TEMPLATE_INST); return ret; } /* Instantiate the alias template TMPL with ARGS. Also push a template instantiation level, which instantiate_template doesn't do because functions and variables have sufficient context established by the callers. */ static tree instantiate_alias_template (tree tmpl, tree args, tsubst_flags_t complain) { if (tmpl == error_mark_node || args == error_mark_node) return error_mark_node; if (!push_tinst_level (tmpl, args)) return error_mark_node; args = coerce_innermost_template_parms (DECL_TEMPLATE_PARMS (tmpl), args, tmpl, complain, /*require_all_args=*/true, /*use_default_args=*/true); tree r = instantiate_template (tmpl, args, complain); pop_tinst_level (); return r; } /* PARM is a template parameter pack for FN. Returns true iff PARM is used in a deducible way in the argument list of FN. */ static bool pack_deducible_p (tree parm, tree fn) { tree t = FUNCTION_FIRST_USER_PARMTYPE (fn); for (; t; t = TREE_CHAIN (t)) { tree type = TREE_VALUE (t); tree packs; if (!PACK_EXPANSION_P (type)) continue; for (packs = PACK_EXPANSION_PARAMETER_PACKS (type); packs; packs = TREE_CHAIN (packs)) if (template_args_equal (TREE_VALUE (packs), parm)) { /* The template parameter pack is used in a function parameter pack. If this is the end of the parameter list, the template parameter pack is deducible. */ if (TREE_CHAIN (t) == void_list_node) return true; else /* Otherwise, not. Well, it could be deduced from a non-pack parameter, but doing so would end up with a deduction mismatch, so don't bother. */ return false; } } /* The template parameter pack isn't used in any function parameter packs, but it might be used deeper, e.g. tuple<Args...>. */ return true; } /* The FN is a TEMPLATE_DECL for a function. ARGS is an array with NARGS elements of the arguments that are being used when calling it. TARGS is a vector into which the deduced template arguments are placed. Returns either a FUNCTION_DECL for the matching specialization of FN or NULL_TREE if no suitable specialization can be found. If EXPLAIN_P is true, diagnostics will be printed to explain why it failed. If FN is a conversion operator, or we are trying to produce a specific specialization, RETURN_TYPE is the return type desired. The EXPLICIT_TARGS are explicit template arguments provided via a template-id. The parameter STRICT is one of: DEDUCE_CALL: We are deducing arguments for a function call, as in [temp.deduct.call]. If RETURN_TYPE is non-null, we are deducing arguments for a call to the result of a conversion function template, as in [over.call.object]. DEDUCE_CONV: We are deducing arguments for a conversion function, as in [temp.deduct.conv]. DEDUCE_EXACT: We are deducing arguments when doing an explicit instantiation as in [temp.explicit], when determining an explicit specialization as in [temp.expl.spec], or when taking the address of a function template, as in [temp.deduct.funcaddr]. */ tree fn_type_unification (tree fn, tree explicit_targs, tree targs, const tree *args, unsigned int nargs, tree return_type, unification_kind_t strict, int flags, bool explain_p, bool decltype_p) { tree parms; tree fntype; tree decl = NULL_TREE; tsubst_flags_t complain = (explain_p ? tf_warning_or_error : tf_none); bool ok; static int deduction_depth; tree orig_fn = fn; if (flag_new_inheriting_ctors) fn = strip_inheriting_ctors (fn); tree tparms = DECL_INNERMOST_TEMPLATE_PARMS (fn); tree r = error_mark_node; tree full_targs = targs; if (TMPL_ARGS_DEPTH (targs) < TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (fn))) full_targs = (add_outermost_template_args (DECL_TI_ARGS (DECL_TEMPLATE_RESULT (fn)), targs)); if (decltype_p) complain |= tf_decltype; /* In C++0x, it's possible to have a function template whose type depends on itself recursively. This is most obvious with decltype, but can also occur with enumeration scope (c++/48969). So we need to catch infinite recursion and reject the substitution at deduction time; this function will return error_mark_node for any repeated substitution. This also catches excessive recursion such as when f<N> depends on f<N-1> across all integers, and returns error_mark_node for all the substitutions back up to the initial one. This is, of course, not reentrant. */ if (excessive_deduction_depth) return error_mark_node; ++deduction_depth; gcc_assert (TREE_CODE (fn) == TEMPLATE_DECL); fntype = TREE_TYPE (fn); if (explicit_targs) { /* [temp.deduct] The specified template arguments must match the template parameters in kind (i.e., type, nontype, template), and there must not be more arguments than there are parameters; otherwise type deduction fails. Nontype arguments must match the types of the corresponding nontype template parameters, or must be convertible to the types of the corresponding nontype parameters as specified in _temp.arg.nontype_, otherwise type deduction fails. All references in the function type of the function template to the corresponding template parameters are replaced by the specified template argument values. If a substitution in a template parameter or in the function type of the function template results in an invalid type, type deduction fails. */ int i, len = TREE_VEC_LENGTH (tparms); location_t loc = input_location; bool incomplete = false; if (explicit_targs == error_mark_node) goto fail; if (TMPL_ARGS_DEPTH (explicit_targs) < TMPL_ARGS_DEPTH (full_targs)) explicit_targs = add_outermost_template_args (full_targs, explicit_targs); /* Adjust any explicit template arguments before entering the substitution context. */ explicit_targs = (coerce_template_parms (tparms, explicit_targs, NULL_TREE, complain, /*require_all_args=*/false, /*use_default_args=*/false)); if (explicit_targs == error_mark_node) goto fail; /* Substitute the explicit args into the function type. This is necessary so that, for instance, explicitly declared function arguments can match null pointed constants. If we were given an incomplete set of explicit args, we must not do semantic processing during substitution as we could create partial instantiations. */ for (i = 0; i < len; i++) { tree parm = TREE_VALUE (TREE_VEC_ELT (tparms, i)); bool parameter_pack = false; tree targ = TREE_VEC_ELT (explicit_targs, i); /* Dig out the actual parm. */ if (TREE_CODE (parm) == TYPE_DECL || TREE_CODE (parm) == TEMPLATE_DECL) { parm = TREE_TYPE (parm); parameter_pack = TEMPLATE_TYPE_PARAMETER_PACK (parm); } else if (TREE_CODE (parm) == PARM_DECL) { parm = DECL_INITIAL (parm); parameter_pack = TEMPLATE_PARM_PARAMETER_PACK (parm); } if (!parameter_pack && targ == NULL_TREE) /* No explicit argument for this template parameter. */ incomplete = true; if (parameter_pack && pack_deducible_p (parm, fn)) { /* Mark the argument pack as "incomplete". We could still deduce more arguments during unification. We remove this mark in type_unification_real. */ if (targ) { ARGUMENT_PACK_INCOMPLETE_P(targ) = 1; ARGUMENT_PACK_EXPLICIT_ARGS (targ) = ARGUMENT_PACK_ARGS (targ); } /* We have some incomplete argument packs. */ incomplete = true; } } if (!push_tinst_level (fn, explicit_targs)) { excessive_deduction_depth = true; goto fail; } processing_template_decl += incomplete; input_location = DECL_SOURCE_LOCATION (fn); /* Ignore any access checks; we'll see them again in instantiate_template and they might have the wrong access path at this point. */ push_deferring_access_checks (dk_deferred); fntype = tsubst (TREE_TYPE (fn), explicit_targs, complain | tf_partial | tf_fndecl_type, NULL_TREE); pop_deferring_access_checks (); input_location = loc; processing_template_decl -= incomplete; pop_tinst_level (); if (fntype == error_mark_node) goto fail; /* Place the explicitly specified arguments in TARGS. */ explicit_targs = INNERMOST_TEMPLATE_ARGS (explicit_targs); for (i = NUM_TMPL_ARGS (explicit_targs); i--;) TREE_VEC_ELT (targs, i) = TREE_VEC_ELT (explicit_targs, i); } /* Never do unification on the 'this' parameter. */ parms = skip_artificial_parms_for (fn, TYPE_ARG_TYPES (fntype)); if (return_type && strict == DEDUCE_CALL) { /* We're deducing for a call to the result of a template conversion function. The parms we really want are in return_type. */ if (POINTER_TYPE_P (return_type)) return_type = TREE_TYPE (return_type); parms = TYPE_ARG_TYPES (return_type); } else if (return_type) { tree *new_args; parms = tree_cons (NULL_TREE, TREE_TYPE (fntype), parms); new_args = XALLOCAVEC (tree, nargs + 1); new_args[0] = return_type; memcpy (new_args + 1, args, nargs * sizeof (tree)); args = new_args; ++nargs; } /* We allow incomplete unification without an error message here because the standard doesn't seem to explicitly prohibit it. Our callers must be ready to deal with unification failures in any event. */ /* If we aren't explaining yet, push tinst context so we can see where any errors (e.g. from class instantiations triggered by instantiation of default template arguments) come from. If we are explaining, this context is redundant. */ if (!explain_p && !push_tinst_level (fn, targs)) { excessive_deduction_depth = true; goto fail; } /* type_unification_real will pass back any access checks from default template argument substitution. */ vec<deferred_access_check, va_gc> *checks; checks = NULL; ok = !type_unification_real (DECL_INNERMOST_TEMPLATE_PARMS (fn), full_targs, parms, args, nargs, /*subr=*/0, strict, flags, &checks, explain_p); if (!explain_p) pop_tinst_level (); if (!ok) goto fail; /* Now that we have bindings for all of the template arguments, ensure that the arguments deduced for the template template parameters have compatible template parameter lists. We cannot check this property before we have deduced all template arguments, because the template parameter types of a template template parameter might depend on prior template parameters deduced after the template template parameter. The following ill-formed example illustrates this issue: template<typename T, template<T> class C> void f(C<5>, T); template<int N> struct X {}; void g() { f(X<5>(), 5l); // error: template argument deduction fails } The template parameter list of 'C' depends on the template type parameter 'T', but 'C' is deduced to 'X' before 'T' is deduced to 'long'. Thus, we can't check that 'C' cannot bind to 'X' at the time that we deduce 'C'. */ if (!template_template_parm_bindings_ok_p (DECL_INNERMOST_TEMPLATE_PARMS (fn), targs)) { unify_inconsistent_template_template_parameters (explain_p); goto fail; } /* All is well so far. Now, check: [temp.deduct] When all template arguments have been deduced, all uses of template parameters in nondeduced contexts are replaced with the corresponding deduced argument values. If the substitution results in an invalid type, as described above, type deduction fails. */ if (!push_tinst_level (fn, targs)) { excessive_deduction_depth = true; goto fail; } /* Also collect access checks from the instantiation. */ reopen_deferring_access_checks (checks); decl = instantiate_template (fn, targs, complain); checks = get_deferred_access_checks (); pop_deferring_access_checks (); pop_tinst_level (); if (decl == error_mark_node) goto fail; /* Now perform any access checks encountered during substitution. */ push_access_scope (decl); ok = perform_access_checks (checks, complain); pop_access_scope (decl); if (!ok) goto fail; /* If we're looking for an exact match, check that what we got is indeed an exact match. It might not be if some template parameters are used in non-deduced contexts. But don't check for an exact match if we have dependent template arguments; in that case we're doing partial ordering, and we already know that we have two candidates that will provide the actual type. */ if (strict == DEDUCE_EXACT && !any_dependent_template_arguments_p (targs)) { tree substed = TREE_TYPE (decl); unsigned int i; tree sarg = skip_artificial_parms_for (decl, TYPE_ARG_TYPES (substed)); if (return_type) sarg = tree_cons (NULL_TREE, TREE_TYPE (substed), sarg); for (i = 0; i < nargs && sarg; ++i, sarg = TREE_CHAIN (sarg)) if (!same_type_p (args[i], TREE_VALUE (sarg))) { unify_type_mismatch (explain_p, args[i], TREE_VALUE (sarg)); goto fail; } } /* After doing deduction with the inherited constructor, actually return an instantiation of the inheriting constructor. */ if (orig_fn != fn) decl = instantiate_template (orig_fn, targs, complain); r = decl; fail: --deduction_depth; if (excessive_deduction_depth) { if (deduction_depth == 0) /* Reset once we're all the way out. */ excessive_deduction_depth = false; } return r; } /* Adjust types before performing type deduction, as described in [temp.deduct.call] and [temp.deduct.conv]. The rules in these two sections are symmetric. PARM is the type of a function parameter or the return type of the conversion function. ARG is the type of the argument passed to the call, or the type of the value initialized with the result of the conversion function. ARG_EXPR is the original argument expression, which may be null. */ static int maybe_adjust_types_for_deduction (unification_kind_t strict, tree* parm, tree* arg, tree arg_expr) { int result = 0; switch (strict) { case DEDUCE_CALL: break; case DEDUCE_CONV: /* Swap PARM and ARG throughout the remainder of this function; the handling is precisely symmetric since PARM will initialize ARG rather than vice versa. */ std::swap (parm, arg); break; case DEDUCE_EXACT: /* Core issue #873: Do the DR606 thing (see below) for these cases, too, but here handle it by stripping the reference from PARM rather than by adding it to ARG. */ if (TREE_CODE (*parm) == REFERENCE_TYPE && TYPE_REF_IS_RVALUE (*parm) && TREE_CODE (TREE_TYPE (*parm)) == TEMPLATE_TYPE_PARM && cp_type_quals (TREE_TYPE (*parm)) == TYPE_UNQUALIFIED && TREE_CODE (*arg) == REFERENCE_TYPE && !TYPE_REF_IS_RVALUE (*arg)) *parm = TREE_TYPE (*parm); /* Nothing else to do in this case. */ return 0; default: gcc_unreachable (); } if (TREE_CODE (*parm) != REFERENCE_TYPE) { /* [temp.deduct.call] If P is not a reference type: --If A is an array type, the pointer type produced by the array-to-pointer standard conversion (_conv.array_) is used in place of A for type deduction; otherwise, --If A is a function type, the pointer type produced by the function-to-pointer standard conversion (_conv.func_) is used in place of A for type deduction; otherwise, --If A is a cv-qualified type, the top level cv-qualifiers of A's type are ignored for type deduction. */ if (TREE_CODE (*arg) == ARRAY_TYPE) *arg = build_pointer_type (TREE_TYPE (*arg)); else if (TREE_CODE (*arg) == FUNCTION_TYPE) *arg = build_pointer_type (*arg); else *arg = TYPE_MAIN_VARIANT (*arg); } /* [14.8.2.1/3 temp.deduct.call], "A forwarding reference is an rvalue reference to a cv-unqualified template parameter that does not represent a template parameter of a class template (during class template argument deduction (13.3.1.8)). If P is a forwarding reference and the argument is an lvalue, the type "lvalue reference to A" is used in place of A for type deduction. */ if (TREE_CODE (*parm) == REFERENCE_TYPE && TYPE_REF_IS_RVALUE (*parm) && TREE_CODE (TREE_TYPE (*parm)) == TEMPLATE_TYPE_PARM && !TEMPLATE_TYPE_PARM_FOR_CLASS (TREE_TYPE (*parm)) && cp_type_quals (TREE_TYPE (*parm)) == TYPE_UNQUALIFIED && (arg_expr ? lvalue_p (arg_expr) /* try_one_overload doesn't provide an arg_expr, but functions are always lvalues. */ : TREE_CODE (*arg) == FUNCTION_TYPE)) *arg = build_reference_type (*arg); /* [temp.deduct.call] If P is a cv-qualified type, the top level cv-qualifiers of P's type are ignored for type deduction. If P is a reference type, the type referred to by P is used for type deduction. */ *parm = TYPE_MAIN_VARIANT (*parm); if (TREE_CODE (*parm) == REFERENCE_TYPE) { *parm = TREE_TYPE (*parm); result |= UNIFY_ALLOW_OUTER_MORE_CV_QUAL; } /* DR 322. For conversion deduction, remove a reference type on parm too (which has been swapped into ARG). */ if (strict == DEDUCE_CONV && TREE_CODE (*arg) == REFERENCE_TYPE) *arg = TREE_TYPE (*arg); return result; } /* Subroutine of unify_one_argument. PARM is a function parameter of a template which does contain any deducible template parameters; check if ARG is a suitable match for it. STRICT, FLAGS and EXPLAIN_P are as in unify_one_argument. */ static int check_non_deducible_conversion (tree parm, tree arg, int strict, int flags, bool explain_p) { tree type; if (!TYPE_P (arg)) type = TREE_TYPE (arg); else type = arg; if (same_type_p (parm, type)) return unify_success (explain_p); if (strict == DEDUCE_CONV) { if (can_convert_arg (type, parm, NULL_TREE, flags, explain_p ? tf_warning_or_error : tf_none)) return unify_success (explain_p); } else if (strict != DEDUCE_EXACT) { if (can_convert_arg (parm, type, TYPE_P (arg) ? NULL_TREE : arg, flags, explain_p ? tf_warning_or_error : tf_none)) return unify_success (explain_p); } if (strict == DEDUCE_EXACT) return unify_type_mismatch (explain_p, parm, arg); else return unify_arg_conversion (explain_p, parm, type, arg); } static bool uses_deducible_template_parms (tree type); /* Returns true iff the expression EXPR is one from which a template argument can be deduced. In other words, if it's an undecorated use of a template non-type parameter. */ static bool deducible_expression (tree expr) { /* Strip implicit conversions. */ while (CONVERT_EXPR_P (expr)) expr = TREE_OPERAND (expr, 0); return (TREE_CODE (expr) == TEMPLATE_PARM_INDEX); } /* Returns true iff the array domain DOMAIN uses a template parameter in a deducible way; that is, if it has a max value of <PARM> - 1. */ static bool deducible_array_bound (tree domain) { if (domain == NULL_TREE) return false; tree max = TYPE_MAX_VALUE (domain); if (TREE_CODE (max) != MINUS_EXPR) return false; return deducible_expression (TREE_OPERAND (max, 0)); } /* Returns true iff the template arguments ARGS use a template parameter in a deducible way. */ static bool deducible_template_args (tree args) { for (int i = 0; i < TREE_VEC_LENGTH (args); ++i) { bool deducible; tree elt = TREE_VEC_ELT (args, i); if (ARGUMENT_PACK_P (elt)) deducible = deducible_template_args (ARGUMENT_PACK_ARGS (elt)); else { if (PACK_EXPANSION_P (elt)) elt = PACK_EXPANSION_PATTERN (elt); if (TREE_CODE (elt) == TEMPLATE_TEMPLATE_PARM) deducible = true; else if (TYPE_P (elt)) deducible = uses_deducible_template_parms (elt); else deducible = deducible_expression (elt); } if (deducible) return true; } return false; } /* Returns true iff TYPE contains any deducible references to template parameters, as per 14.8.2.5. */ static bool uses_deducible_template_parms (tree type) { if (PACK_EXPANSION_P (type)) type = PACK_EXPANSION_PATTERN (type); /* T cv-list T TT<T> TT<i> TT<> */ if (TREE_CODE (type) == TEMPLATE_TYPE_PARM || TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM) return true; /* T* T& T&& */ if (POINTER_TYPE_P (type)) return uses_deducible_template_parms (TREE_TYPE (type)); /* T[integer-constant ] type [i] */ if (TREE_CODE (type) == ARRAY_TYPE) return (uses_deducible_template_parms (TREE_TYPE (type)) || deducible_array_bound (TYPE_DOMAIN (type))); /* T type ::* type T::* T T::* T (type ::*)() type (T::*)() type (type ::*)(T) type (T::*)(T) T (type ::*)(T) T (T::*)() T (T::*)(T) */ if (TYPE_PTRMEM_P (type)) return (uses_deducible_template_parms (TYPE_PTRMEM_CLASS_TYPE (type)) || (uses_deducible_template_parms (TYPE_PTRMEM_POINTED_TO_TYPE (type)))); /* template-name <T> (where template-name refers to a class template) template-name <i> (where template-name refers to a class template) */ if (CLASS_TYPE_P (type) && CLASSTYPE_TEMPLATE_INFO (type) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (type))) return deducible_template_args (INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (type))); /* type (T) T() T(T) */ if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE) { if (uses_deducible_template_parms (TREE_TYPE (type))) return true; tree parm = TYPE_ARG_TYPES (type); if (TREE_CODE (type) == METHOD_TYPE) parm = TREE_CHAIN (parm); for (; parm; parm = TREE_CHAIN (parm)) if (uses_deducible_template_parms (TREE_VALUE (parm))) return true; } return false; } /* Subroutine of type_unification_real and unify_pack_expansion to handle unification of a single P/A pair. Parameters are as for those functions. */ static int unify_one_argument (tree tparms, tree targs, tree parm, tree arg, int subr, unification_kind_t strict, bool explain_p) { tree arg_expr = NULL_TREE; int arg_strict; if (arg == error_mark_node || parm == error_mark_node) return unify_invalid (explain_p); if (arg == unknown_type_node) /* We can't deduce anything from this, but we might get all the template args from other function args. */ return unify_success (explain_p); /* Implicit conversions (Clause 4) will be performed on a function argument to convert it to the type of the corresponding function parameter if the parameter type contains no template-parameters that participate in template argument deduction. */ if (strict != DEDUCE_EXACT && TYPE_P (parm) && !uses_deducible_template_parms (parm)) /* For function parameters with no deducible template parameters, just return. We'll check non-dependent conversions later. */ return unify_success (explain_p); switch (strict) { case DEDUCE_CALL: arg_strict = (UNIFY_ALLOW_OUTER_LEVEL | UNIFY_ALLOW_MORE_CV_QUAL | UNIFY_ALLOW_DERIVED); break; case DEDUCE_CONV: arg_strict = UNIFY_ALLOW_LESS_CV_QUAL; break; case DEDUCE_EXACT: arg_strict = UNIFY_ALLOW_NONE; break; default: gcc_unreachable (); } /* We only do these transformations if this is the top-level parameter_type_list in a call or declaration matching; in other situations (nested function declarators, template argument lists) we won't be comparing a type to an expression, and we don't do any type adjustments. */ if (!subr) { if (!TYPE_P (arg)) { gcc_assert (TREE_TYPE (arg) != NULL_TREE); if (type_unknown_p (arg)) { /* [temp.deduct.type] A template-argument can be deduced from a pointer to function or pointer to member function argument if the set of overloaded functions does not contain function templates and at most one of a set of overloaded functions provides a unique match. */ resolve_overloaded_unification (tparms, targs, parm, arg, strict, arg_strict, explain_p); /* If a unique match was not found, this is a non-deduced context, so we still succeed. */ return unify_success (explain_p); } arg_expr = arg; arg = unlowered_expr_type (arg); if (arg == error_mark_node) return unify_invalid (explain_p); } arg_strict |= maybe_adjust_types_for_deduction (strict, &parm, &arg, arg_expr); } else if ((TYPE_P (parm) || TREE_CODE (parm) == TEMPLATE_DECL) != (TYPE_P (arg) || TREE_CODE (arg) == TEMPLATE_DECL)) return unify_template_argument_mismatch (explain_p, parm, arg); /* For deduction from an init-list we need the actual list. */ if (arg_expr && BRACE_ENCLOSED_INITIALIZER_P (arg_expr)) arg = arg_expr; return unify (tparms, targs, parm, arg, arg_strict, explain_p); } /* for_each_template_parm callback that always returns 0. */ static int zero_r (tree, void *) { return 0; } /* for_each_template_parm any_fn callback to handle deduction of a template type argument from the type of an array bound. */ static int array_deduction_r (tree t, void *data) { tree_pair_p d = (tree_pair_p)data; tree &tparms = d->purpose; tree &targs = d->value; if (TREE_CODE (t) == ARRAY_TYPE) if (tree dom = TYPE_DOMAIN (t)) if (tree max = TYPE_MAX_VALUE (dom)) { if (TREE_CODE (max) == MINUS_EXPR) max = TREE_OPERAND (max, 0); if (TREE_CODE (max) == TEMPLATE_PARM_INDEX) unify (tparms, targs, TREE_TYPE (max), size_type_node, UNIFY_ALLOW_NONE, /*explain*/false); } /* Keep walking. */ return 0; } /* Try to deduce any not-yet-deduced template type arguments from the type of an array bound. This is handled separately from unify because 14.8.2.5 says "The type of a type parameter is only deduced from an array bound if it is not otherwise deduced." */ static void try_array_deduction (tree tparms, tree targs, tree parm) { tree_pair_s data = { tparms, targs }; hash_set<tree> visited; for_each_template_parm (parm, zero_r, &data, &visited, /*nondeduced*/false, array_deduction_r); } /* Most parms like fn_type_unification. If SUBR is 1, we're being called recursively (to unify the arguments of a function or method parameter of a function template). CHECKS is a pointer to a vector of access checks encountered while substituting default template arguments. */ static int type_unification_real (tree tparms, tree full_targs, tree xparms, const tree *xargs, unsigned int xnargs, int subr, unification_kind_t strict, int flags, vec<deferred_access_check, va_gc> **checks, bool explain_p) { tree parm, arg; int i; int ntparms = TREE_VEC_LENGTH (tparms); int saw_undeduced = 0; tree parms; const tree *args; unsigned int nargs; unsigned int ia; gcc_assert (TREE_CODE (tparms) == TREE_VEC); gcc_assert (xparms == NULL_TREE || TREE_CODE (xparms) == TREE_LIST); gcc_assert (ntparms > 0); tree targs = INNERMOST_TEMPLATE_ARGS (full_targs); /* Reset the number of non-defaulted template arguments contained in TARGS. */ NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs) = NULL_TREE; again: parms = xparms; args = xargs; nargs = xnargs; ia = 0; while (parms && parms != void_list_node && ia < nargs) { parm = TREE_VALUE (parms); if (TREE_CODE (parm) == TYPE_PACK_EXPANSION && (!TREE_CHAIN (parms) || TREE_CHAIN (parms) == void_list_node)) /* For a function parameter pack that occurs at the end of the parameter-declaration-list, the type A of each remaining argument of the call is compared with the type P of the declarator-id of the function parameter pack. */ break; parms = TREE_CHAIN (parms); if (TREE_CODE (parm) == TYPE_PACK_EXPANSION) /* For a function parameter pack that does not occur at the end of the parameter-declaration-list, the type of the parameter pack is a non-deduced context. */ continue; arg = args[ia]; ++ia; if (unify_one_argument (tparms, full_targs, parm, arg, subr, strict, explain_p)) return 1; } if (parms && parms != void_list_node && TREE_CODE (TREE_VALUE (parms)) == TYPE_PACK_EXPANSION) { /* Unify the remaining arguments with the pack expansion type. */ tree argvec; tree parmvec = make_tree_vec (1); /* Allocate a TREE_VEC and copy in all of the arguments */ argvec = make_tree_vec (nargs - ia); for (i = 0; ia < nargs; ++ia, ++i) TREE_VEC_ELT (argvec, i) = args[ia]; /* Copy the parameter into parmvec. */ TREE_VEC_ELT (parmvec, 0) = TREE_VALUE (parms); if (unify_pack_expansion (tparms, full_targs, parmvec, argvec, strict, /*subr=*/subr, explain_p)) return 1; /* Advance to the end of the list of parameters. */ parms = TREE_CHAIN (parms); } /* Fail if we've reached the end of the parm list, and more args are present, and the parm list isn't variadic. */ if (ia < nargs && parms == void_list_node) return unify_too_many_arguments (explain_p, nargs, ia); /* Fail if parms are left and they don't have default values and they aren't all deduced as empty packs (c++/57397). This is consistent with sufficient_parms_p. */ if (parms && parms != void_list_node && TREE_PURPOSE (parms) == NULL_TREE) { unsigned int count = nargs; tree p = parms; bool type_pack_p; do { type_pack_p = TREE_CODE (TREE_VALUE (p)) == TYPE_PACK_EXPANSION; if (!type_pack_p) count++; p = TREE_CHAIN (p); } while (p && p != void_list_node); if (count != nargs) return unify_too_few_arguments (explain_p, ia, count, type_pack_p); } if (!subr) { tsubst_flags_t complain = (explain_p ? tf_warning_or_error : tf_none); bool tried_array_deduction = (cxx_dialect < cxx17); for (i = 0; i < ntparms; i++) { tree targ = TREE_VEC_ELT (targs, i); tree tparm = TREE_VEC_ELT (tparms, i); /* Clear the "incomplete" flags on all argument packs now so that substituting them into later default arguments works. */ if (targ && ARGUMENT_PACK_P (targ)) { ARGUMENT_PACK_INCOMPLETE_P (targ) = 0; ARGUMENT_PACK_EXPLICIT_ARGS (targ) = NULL_TREE; } if (targ || tparm == error_mark_node) continue; tparm = TREE_VALUE (tparm); if (TREE_CODE (tparm) == TYPE_DECL && !tried_array_deduction) { try_array_deduction (tparms, targs, xparms); tried_array_deduction = true; if (TREE_VEC_ELT (targs, i)) continue; } /* If this is an undeduced nontype parameter that depends on a type parameter, try another pass; its type may have been deduced from a later argument than the one from which this parameter can be deduced. */ if (TREE_CODE (tparm) == PARM_DECL && uses_template_parms (TREE_TYPE (tparm)) && saw_undeduced < 2) { saw_undeduced = 1; continue; } /* Core issue #226 (C++0x) [temp.deduct]: If a template argument has not been deduced, its default template argument, if any, is used. When we are in C++98 mode, TREE_PURPOSE will either be NULL_TREE or ERROR_MARK_NODE, so we do not need to explicitly check cxx_dialect here. */ if (TREE_PURPOSE (TREE_VEC_ELT (tparms, i))) /* OK, there is a default argument. Wait until after the conversion check to do substitution. */ continue; /* If the type parameter is a parameter pack, then it will be deduced to an empty parameter pack. */ if (template_parameter_pack_p (tparm)) { tree arg; if (TREE_CODE (tparm) == TEMPLATE_PARM_INDEX) { arg = make_node (NONTYPE_ARGUMENT_PACK); TREE_CONSTANT (arg) = 1; } else arg = cxx_make_type (TYPE_ARGUMENT_PACK); SET_ARGUMENT_PACK_ARGS (arg, make_tree_vec (0)); TREE_VEC_ELT (targs, i) = arg; continue; } return unify_parameter_deduction_failure (explain_p, tparm); } /* DR 1391: All parameters have args, now check non-dependent parms for convertibility. */ if (saw_undeduced < 2) for (ia = 0, parms = xparms, args = xargs, nargs = xnargs; parms && parms != void_list_node && ia < nargs; ) { parm = TREE_VALUE (parms); if (TREE_CODE (parm) == TYPE_PACK_EXPANSION && (!TREE_CHAIN (parms) || TREE_CHAIN (parms) == void_list_node)) /* For a function parameter pack that occurs at the end of the parameter-declaration-list, the type A of each remaining argument of the call is compared with the type P of the declarator-id of the function parameter pack. */ break; parms = TREE_CHAIN (parms); if (TREE_CODE (parm) == TYPE_PACK_EXPANSION) /* For a function parameter pack that does not occur at the end of the parameter-declaration-list, the type of the parameter pack is a non-deduced context. */ continue; arg = args[ia]; ++ia; if (uses_template_parms (parm)) continue; if (check_non_deducible_conversion (parm, arg, strict, flags, explain_p)) return 1; } /* Now substitute into the default template arguments. */ for (i = 0; i < ntparms; i++) { tree targ = TREE_VEC_ELT (targs, i); tree tparm = TREE_VEC_ELT (tparms, i); if (targ || tparm == error_mark_node) continue; tree parm = TREE_VALUE (tparm); tree arg = TREE_PURPOSE (tparm); reopen_deferring_access_checks (*checks); location_t save_loc = input_location; if (DECL_P (parm)) input_location = DECL_SOURCE_LOCATION (parm); if (saw_undeduced == 1) ++processing_template_decl; if (saw_undeduced == 1 && TREE_CODE (parm) == PARM_DECL && uses_template_parms (TREE_TYPE (parm))) { /* The type of this non-type parameter depends on undeduced parameters. Don't try to use its default argument yet, but do check whether the arguments we already have cause substitution failure, so that that happens before we try later default arguments (78489). */ tree type = tsubst (TREE_TYPE (parm), full_targs, complain, NULL_TREE); if (type == error_mark_node) arg = error_mark_node; else arg = NULL_TREE; } else { arg = tsubst_template_arg (arg, full_targs, complain, NULL_TREE); if (!uses_template_parms (arg)) arg = convert_template_argument (parm, arg, full_targs, complain, i, NULL_TREE); else if (saw_undeduced == 1) arg = NULL_TREE; else arg = error_mark_node; } if (saw_undeduced == 1) --processing_template_decl; input_location = save_loc; *checks = get_deferred_access_checks (); pop_deferring_access_checks (); if (arg == error_mark_node) return 1; else if (arg) { TREE_VEC_ELT (targs, i) = arg; /* The position of the first default template argument, is also the number of non-defaulted arguments in TARGS. Record that. */ if (!NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs)) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs, i); } } if (saw_undeduced++ == 1) goto again; } if (CHECKING_P && !NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs)) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs, TREE_VEC_LENGTH (targs)); return unify_success (explain_p); } /* Subroutine of type_unification_real. Args are like the variables at the call site. ARG is an overloaded function (or template-id); we try deducing template args from each of the overloads, and if only one succeeds, we go with that. Modifies TARGS and returns true on success. */ static bool resolve_overloaded_unification (tree tparms, tree targs, tree parm, tree arg, unification_kind_t strict, int sub_strict, bool explain_p) { tree tempargs = copy_node (targs); int good = 0; tree goodfn = NULL_TREE; bool addr_p; if (TREE_CODE (arg) == ADDR_EXPR) { arg = TREE_OPERAND (arg, 0); addr_p = true; } else addr_p = false; if (TREE_CODE (arg) == COMPONENT_REF) /* Handle `&x' where `x' is some static or non-static member function name. */ arg = TREE_OPERAND (arg, 1); if (TREE_CODE (arg) == OFFSET_REF) arg = TREE_OPERAND (arg, 1); /* Strip baselink information. */ if (BASELINK_P (arg)) arg = BASELINK_FUNCTIONS (arg); if (TREE_CODE (arg) == TEMPLATE_ID_EXPR) { /* If we got some explicit template args, we need to plug them into the affected templates before we try to unify, in case the explicit args will completely resolve the templates in question. */ int ok = 0; tree expl_subargs = TREE_OPERAND (arg, 1); arg = TREE_OPERAND (arg, 0); for (lkp_iterator iter (arg); iter; ++iter) { tree fn = *iter; tree subargs, elem; if (TREE_CODE (fn) != TEMPLATE_DECL) continue; subargs = coerce_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (fn), expl_subargs, NULL_TREE, tf_none, /*require_all_args=*/true, /*use_default_args=*/true); if (subargs != error_mark_node && !any_dependent_template_arguments_p (subargs)) { elem = TREE_TYPE (instantiate_template (fn, subargs, tf_none)); if (try_one_overload (tparms, targs, tempargs, parm, elem, strict, sub_strict, addr_p, explain_p) && (!goodfn || !same_type_p (goodfn, elem))) { goodfn = elem; ++good; } } else if (subargs) ++ok; } /* If no templates (or more than one) are fully resolved by the explicit arguments, this template-id is a non-deduced context; it could still be OK if we deduce all template arguments for the enclosing call through other arguments. */ if (good != 1) good = ok; } else if (TREE_CODE (arg) != OVERLOAD && TREE_CODE (arg) != FUNCTION_DECL) /* If ARG is, for example, "(0, &f)" then its type will be unknown -- but the deduction does not succeed because the expression is not just the function on its own. */ return false; else for (lkp_iterator iter (arg); iter; ++iter) { tree fn = *iter; if (try_one_overload (tparms, targs, tempargs, parm, TREE_TYPE (fn), strict, sub_strict, addr_p, explain_p) && (!goodfn || !decls_match (goodfn, fn))) { goodfn = fn; ++good; } } /* [temp.deduct.type] A template-argument can be deduced from a pointer to function or pointer to member function argument if the set of overloaded functions does not contain function templates and at most one of a set of overloaded functions provides a unique match. So if we found multiple possibilities, we return success but don't deduce anything. */ if (good == 1) { int i = TREE_VEC_LENGTH (targs); for (; i--; ) if (TREE_VEC_ELT (tempargs, i)) { tree old = TREE_VEC_ELT (targs, i); tree new_ = TREE_VEC_ELT (tempargs, i); if (new_ && old && ARGUMENT_PACK_P (old) && ARGUMENT_PACK_EXPLICIT_ARGS (old)) /* Don't forget explicit template arguments in a pack. */ ARGUMENT_PACK_EXPLICIT_ARGS (new_) = ARGUMENT_PACK_EXPLICIT_ARGS (old); TREE_VEC_ELT (targs, i) = new_; } } if (good) return true; return false; } /* Core DR 115: In contexts where deduction is done and fails, or in contexts where deduction is not done, if a template argument list is specified and it, along with any default template arguments, identifies a single function template specialization, then the template-id is an lvalue for the function template specialization. */ tree resolve_nondeduced_context (tree orig_expr, tsubst_flags_t complain) { tree expr, offset, baselink; bool addr; if (!type_unknown_p (orig_expr)) return orig_expr; expr = orig_expr; addr = false; offset = NULL_TREE; baselink = NULL_TREE; if (TREE_CODE (expr) == ADDR_EXPR) { expr = TREE_OPERAND (expr, 0); addr = true; } if (TREE_CODE (expr) == OFFSET_REF) { offset = expr; expr = TREE_OPERAND (expr, 1); } if (BASELINK_P (expr)) { baselink = expr; expr = BASELINK_FUNCTIONS (expr); } if (TREE_CODE (expr) == TEMPLATE_ID_EXPR) { int good = 0; tree goodfn = NULL_TREE; /* If we got some explicit template args, we need to plug them into the affected templates before we try to unify, in case the explicit args will completely resolve the templates in question. */ tree expl_subargs = TREE_OPERAND (expr, 1); tree arg = TREE_OPERAND (expr, 0); tree badfn = NULL_TREE; tree badargs = NULL_TREE; for (lkp_iterator iter (arg); iter; ++iter) { tree fn = *iter; tree subargs, elem; if (TREE_CODE (fn) != TEMPLATE_DECL) continue; subargs = coerce_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (fn), expl_subargs, NULL_TREE, tf_none, /*require_all_args=*/true, /*use_default_args=*/true); if (subargs != error_mark_node && !any_dependent_template_arguments_p (subargs)) { elem = instantiate_template (fn, subargs, tf_none); if (elem == error_mark_node) { badfn = fn; badargs = subargs; } else if (elem && (!goodfn || !decls_match (goodfn, elem))) { goodfn = elem; ++good; } } } if (good == 1) { mark_used (goodfn); expr = goodfn; if (baselink) expr = build_baselink (BASELINK_BINFO (baselink), BASELINK_ACCESS_BINFO (baselink), expr, BASELINK_OPTYPE (baselink)); if (offset) { tree base = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (offset, 0))); expr = build_offset_ref (base, expr, addr, complain); } if (addr) expr = cp_build_addr_expr (expr, complain); return expr; } else if (good == 0 && badargs && (complain & tf_error)) /* There were no good options and at least one bad one, so let the user know what the problem is. */ instantiate_template (badfn, badargs, complain); } return orig_expr; } /* Subroutine of resolve_overloaded_unification; does deduction for a single overload. Fills TARGS with any deduced arguments, or error_mark_node if different overloads deduce different arguments for a given parm. ADDR_P is true if the expression for which deduction is being performed was of the form "& fn" rather than simply "fn". Returns 1 on success. */ static int try_one_overload (tree tparms, tree orig_targs, tree targs, tree parm, tree arg, unification_kind_t strict, int sub_strict, bool addr_p, bool explain_p) { int nargs; tree tempargs; int i; if (arg == error_mark_node) return 0; /* [temp.deduct.type] A template-argument can be deduced from a pointer to function or pointer to member function argument if the set of overloaded functions does not contain function templates and at most one of a set of overloaded functions provides a unique match. So if this is a template, just return success. */ if (uses_template_parms (arg)) return 1; if (TREE_CODE (arg) == METHOD_TYPE) arg = build_ptrmemfunc_type (build_pointer_type (arg)); else if (addr_p) arg = build_pointer_type (arg); sub_strict |= maybe_adjust_types_for_deduction (strict, &parm, &arg, NULL); /* We don't copy orig_targs for this because if we have already deduced some template args from previous args, unify would complain when we try to deduce a template parameter for the same argument, even though there isn't really a conflict. */ nargs = TREE_VEC_LENGTH (targs); tempargs = make_tree_vec (nargs); if (unify (tparms, tempargs, parm, arg, sub_strict, explain_p)) return 0; /* First make sure we didn't deduce anything that conflicts with explicitly specified args. */ for (i = nargs; i--; ) { tree elt = TREE_VEC_ELT (tempargs, i); tree oldelt = TREE_VEC_ELT (orig_targs, i); if (!elt) /*NOP*/; else if (uses_template_parms (elt)) /* Since we're unifying against ourselves, we will fill in template args used in the function parm list with our own template parms. Discard them. */ TREE_VEC_ELT (tempargs, i) = NULL_TREE; else if (oldelt && ARGUMENT_PACK_P (oldelt)) { /* Check that the argument at each index of the deduced argument pack is equivalent to the corresponding explicitly specified argument. We may have deduced more arguments than were explicitly specified, and that's OK. */ /* We used to assert ARGUMENT_PACK_INCOMPLETE_P (oldelt) here, but that's wrong if we deduce the same argument pack from multiple function arguments: it's only incomplete the first time. */ tree explicit_pack = ARGUMENT_PACK_ARGS (oldelt); tree deduced_pack = ARGUMENT_PACK_ARGS (elt); if (TREE_VEC_LENGTH (deduced_pack) < TREE_VEC_LENGTH (explicit_pack)) return 0; for (int j = 0; j < TREE_VEC_LENGTH (explicit_pack); j++) if (!template_args_equal (TREE_VEC_ELT (explicit_pack, j), TREE_VEC_ELT (deduced_pack, j))) return 0; } else if (oldelt && !template_args_equal (oldelt, elt)) return 0; } for (i = nargs; i--; ) { tree elt = TREE_VEC_ELT (tempargs, i); if (elt) TREE_VEC_ELT (targs, i) = elt; } return 1; } /* PARM is a template class (perhaps with unbound template parameters). ARG is a fully instantiated type. If ARG can be bound to PARM, return ARG, otherwise return NULL_TREE. TPARMS and TARGS are as for unify. */ static tree try_class_unification (tree tparms, tree targs, tree parm, tree arg, bool explain_p) { tree copy_of_targs; if (!CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P (arg)) return NULL_TREE; else if (TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM) /* Matches anything. */; else if (most_general_template (CLASSTYPE_TI_TEMPLATE (arg)) != most_general_template (CLASSTYPE_TI_TEMPLATE (parm))) return NULL_TREE; /* We need to make a new template argument vector for the call to unify. If we used TARGS, we'd clutter it up with the result of the attempted unification, even if this class didn't work out. We also don't want to commit ourselves to all the unifications we've already done, since unification is supposed to be done on an argument-by-argument basis. In other words, consider the following pathological case: template <int I, int J, int K> struct S {}; template <int I, int J> struct S<I, J, 2> : public S<I, I, I>, S<J, J, J> {}; template <int I, int J, int K> void f(S<I, J, K>, S<I, I, I>); void g() { S<0, 0, 0> s0; S<0, 1, 2> s2; f(s0, s2); } Now, by the time we consider the unification involving `s2', we already know that we must have `f<0, 0, 0>'. But, even though `S<0, 1, 2>' is derived from `S<0, 0, 0>', the code is invalid because there are two ways to unify base classes of S<0, 1, 2> with S<I, I, I>. If we kept the already deduced knowledge, we would reject the possibility I=1. */ copy_of_targs = make_tree_vec (TREE_VEC_LENGTH (targs)); if (TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM) { if (unify_bound_ttp_args (tparms, copy_of_targs, parm, arg, explain_p)) return NULL_TREE; return arg; } /* If unification failed, we're done. */ if (unify (tparms, copy_of_targs, CLASSTYPE_TI_ARGS (parm), CLASSTYPE_TI_ARGS (arg), UNIFY_ALLOW_NONE, explain_p)) return NULL_TREE; return arg; } /* Given a template type PARM and a class type ARG, find the unique base type in ARG that is an instance of PARM. We do not examine ARG itself; only its base-classes. If there is not exactly one appropriate base class, return NULL_TREE. PARM may be the type of a partial specialization, as well as a plain template type. Used by unify. */ static enum template_base_result get_template_base (tree tparms, tree targs, tree parm, tree arg, bool explain_p, tree *result) { tree rval = NULL_TREE; tree binfo; gcc_assert (RECORD_OR_UNION_CODE_P (TREE_CODE (arg))); binfo = TYPE_BINFO (complete_type (arg)); if (!binfo) { /* The type could not be completed. */ *result = NULL_TREE; return tbr_incomplete_type; } /* Walk in inheritance graph order. The search order is not important, and this avoids multiple walks of virtual bases. */ for (binfo = TREE_CHAIN (binfo); binfo; binfo = TREE_CHAIN (binfo)) { tree r = try_class_unification (tparms, targs, parm, BINFO_TYPE (binfo), explain_p); if (r) { /* If there is more than one satisfactory baseclass, then: [temp.deduct.call] If they yield more than one possible deduced A, the type deduction fails. applies. */ if (rval && !same_type_p (r, rval)) { *result = NULL_TREE; return tbr_ambiguous_baseclass; } rval = r; } } *result = rval; return tbr_success; } /* Returns the level of DECL, which declares a template parameter. */ static int template_decl_level (tree decl) { switch (TREE_CODE (decl)) { case TYPE_DECL: case TEMPLATE_DECL: return TEMPLATE_TYPE_LEVEL (TREE_TYPE (decl)); case PARM_DECL: return TEMPLATE_PARM_LEVEL (DECL_INITIAL (decl)); default: gcc_unreachable (); } return 0; } /* Decide whether ARG can be unified with PARM, considering only the cv-qualifiers of each type, given STRICT as documented for unify. Returns nonzero iff the unification is OK on that basis. */ static int check_cv_quals_for_unify (int strict, tree arg, tree parm) { int arg_quals = cp_type_quals (arg); int parm_quals = cp_type_quals (parm); if (TREE_CODE (parm) == TEMPLATE_TYPE_PARM && !(strict & UNIFY_ALLOW_OUTER_MORE_CV_QUAL)) { /* Although a CVR qualifier is ignored when being applied to a substituted template parameter ([8.3.2]/1 for example), that does not allow us to unify "const T" with "int&" because both types are not of the form "cv-list T" [14.8.2.5 temp.deduct.type]. It is ok when we're allowing additional CV qualifiers at the outer level [14.8.2.1]/3,1st bullet. */ if ((TREE_CODE (arg) == REFERENCE_TYPE || TREE_CODE (arg) == FUNCTION_TYPE || TREE_CODE (arg) == METHOD_TYPE) && (parm_quals & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE))) return 0; if ((!POINTER_TYPE_P (arg) && TREE_CODE (arg) != TEMPLATE_TYPE_PARM) && (parm_quals & TYPE_QUAL_RESTRICT)) return 0; } if (!(strict & (UNIFY_ALLOW_MORE_CV_QUAL | UNIFY_ALLOW_OUTER_MORE_CV_QUAL)) && (arg_quals & parm_quals) != parm_quals) return 0; if (!(strict & (UNIFY_ALLOW_LESS_CV_QUAL | UNIFY_ALLOW_OUTER_LESS_CV_QUAL)) && (parm_quals & arg_quals) != arg_quals) return 0; return 1; } /* Determines the LEVEL and INDEX for the template parameter PARM. */ void template_parm_level_and_index (tree parm, int* level, int* index) { if (TREE_CODE (parm) == TEMPLATE_TYPE_PARM || TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM || TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM) { *index = TEMPLATE_TYPE_IDX (parm); *level = TEMPLATE_TYPE_LEVEL (parm); } else { *index = TEMPLATE_PARM_IDX (parm); *level = TEMPLATE_PARM_LEVEL (parm); } } #define RECUR_AND_CHECK_FAILURE(TP, TA, P, A, S, EP) \ do { \ if (unify (TP, TA, P, A, S, EP)) \ return 1; \ } while (0) /* Unifies the remaining arguments in PACKED_ARGS with the pack expansion at the end of PACKED_PARMS. Returns 0 if the type deduction succeeds, 1 otherwise. STRICT is the same as in fn_type_unification. CALL_ARGS_P is true iff PACKED_ARGS is actually a function call argument list. We'll need to adjust the arguments to make them types. SUBR tells us if this is from a recursive call to type_unification_real, or for comparing two template argument lists. */ static int unify_pack_expansion (tree tparms, tree targs, tree packed_parms, tree packed_args, unification_kind_t strict, bool subr, bool explain_p) { tree parm = TREE_VEC_ELT (packed_parms, TREE_VEC_LENGTH (packed_parms) - 1); tree pattern = PACK_EXPANSION_PATTERN (parm); tree pack, packs = NULL_TREE; int i, start = TREE_VEC_LENGTH (packed_parms) - 1; /* Add in any args remembered from an earlier partial instantiation. */ targs = add_to_template_args (PACK_EXPANSION_EXTRA_ARGS (parm), targs); int levels = TMPL_ARGS_DEPTH (targs); packed_args = expand_template_argument_pack (packed_args); int len = TREE_VEC_LENGTH (packed_args); /* Determine the parameter packs we will be deducing from the pattern, and record their current deductions. */ for (pack = PACK_EXPANSION_PARAMETER_PACKS (parm); pack; pack = TREE_CHAIN (pack)) { tree parm_pack = TREE_VALUE (pack); int idx, level; /* Only template parameter packs can be deduced, not e.g. function parameter packs or __bases or __integer_pack. */ if (!TEMPLATE_PARM_P (parm_pack)) continue; /* Determine the index and level of this parameter pack. */ template_parm_level_and_index (parm_pack, &level, &idx); if (level < levels) continue; /* Keep track of the parameter packs and their corresponding argument packs. */ packs = tree_cons (parm_pack, TMPL_ARG (targs, level, idx), packs); TREE_TYPE (packs) = make_tree_vec (len - start); } /* Loop through all of the arguments that have not yet been unified and unify each with the pattern. */ for (i = start; i < len; i++) { tree parm; bool any_explicit = false; tree arg = TREE_VEC_ELT (packed_args, i); /* For each parameter pack, set its TMPL_ARG to either NULL_TREE or the element of its argument pack at the current index if this argument was explicitly specified. */ for (pack = packs; pack; pack = TREE_CHAIN (pack)) { int idx, level; tree arg, pargs; template_parm_level_and_index (TREE_PURPOSE (pack), &level, &idx); arg = NULL_TREE; if (TREE_VALUE (pack) && (pargs = ARGUMENT_PACK_EXPLICIT_ARGS (TREE_VALUE (pack))) && (i - start < TREE_VEC_LENGTH (pargs))) { any_explicit = true; arg = TREE_VEC_ELT (pargs, i - start); } TMPL_ARG (targs, level, idx) = arg; } /* If we had explicit template arguments, substitute them into the pattern before deduction. */ if (any_explicit) { /* Some arguments might still be unspecified or dependent. */ bool dependent; ++processing_template_decl; dependent = any_dependent_template_arguments_p (targs); if (!dependent) --processing_template_decl; parm = tsubst (pattern, targs, explain_p ? tf_warning_or_error : tf_none, NULL_TREE); if (dependent) --processing_template_decl; if (parm == error_mark_node) return 1; } else parm = pattern; /* Unify the pattern with the current argument. */ if (unify_one_argument (tparms, targs, parm, arg, subr, strict, explain_p)) return 1; /* For each parameter pack, collect the deduced value. */ for (pack = packs; pack; pack = TREE_CHAIN (pack)) { int idx, level; template_parm_level_and_index (TREE_PURPOSE (pack), &level, &idx); TREE_VEC_ELT (TREE_TYPE (pack), i - start) = TMPL_ARG (targs, level, idx); } } /* Verify that the results of unification with the parameter packs produce results consistent with what we've seen before, and make the deduced argument packs available. */ for (pack = packs; pack; pack = TREE_CHAIN (pack)) { tree old_pack = TREE_VALUE (pack); tree new_args = TREE_TYPE (pack); int i, len = TREE_VEC_LENGTH (new_args); int idx, level; bool nondeduced_p = false; /* By default keep the original deduced argument pack. If necessary, more specific code is going to update the resulting deduced argument later down in this function. */ template_parm_level_and_index (TREE_PURPOSE (pack), &level, &idx); TMPL_ARG (targs, level, idx) = old_pack; /* If NEW_ARGS contains any NULL_TREE entries, we didn't actually deduce anything. */ for (i = 0; i < len && !nondeduced_p; ++i) if (TREE_VEC_ELT (new_args, i) == NULL_TREE) nondeduced_p = true; if (nondeduced_p) continue; if (old_pack && ARGUMENT_PACK_INCOMPLETE_P (old_pack)) { /* If we had fewer function args than explicit template args, just use the explicits. */ tree explicit_args = ARGUMENT_PACK_EXPLICIT_ARGS (old_pack); int explicit_len = TREE_VEC_LENGTH (explicit_args); if (len < explicit_len) new_args = explicit_args; } if (!old_pack) { tree result; /* Build the deduced *_ARGUMENT_PACK. */ if (TREE_CODE (TREE_PURPOSE (pack)) == TEMPLATE_PARM_INDEX) { result = make_node (NONTYPE_ARGUMENT_PACK); TREE_CONSTANT (result) = 1; } else result = cxx_make_type (TYPE_ARGUMENT_PACK); SET_ARGUMENT_PACK_ARGS (result, new_args); /* Note the deduced argument packs for this parameter pack. */ TMPL_ARG (targs, level, idx) = result; } else if (ARGUMENT_PACK_INCOMPLETE_P (old_pack) && (ARGUMENT_PACK_ARGS (old_pack) == ARGUMENT_PACK_EXPLICIT_ARGS (old_pack))) { /* We only had the explicitly-provided arguments before, but now we have a complete set of arguments. */ tree explicit_args = ARGUMENT_PACK_EXPLICIT_ARGS (old_pack); SET_ARGUMENT_PACK_ARGS (old_pack, new_args); ARGUMENT_PACK_INCOMPLETE_P (old_pack) = 1; ARGUMENT_PACK_EXPLICIT_ARGS (old_pack) = explicit_args; } else { tree bad_old_arg = NULL_TREE, bad_new_arg = NULL_TREE; tree old_args = ARGUMENT_PACK_ARGS (old_pack); if (!comp_template_args (old_args, new_args, &bad_old_arg, &bad_new_arg)) /* Inconsistent unification of this parameter pack. */ return unify_parameter_pack_inconsistent (explain_p, bad_old_arg, bad_new_arg); } } return unify_success (explain_p); } /* Handle unification of the domain of an array. PARM_DOM and ARG_DOM are INTEGER_TYPEs representing the TYPE_DOMAIN of ARRAY_TYPEs. The other parameters and return value are as for unify. */ static int unify_array_domain (tree tparms, tree targs, tree parm_dom, tree arg_dom, bool explain_p) { tree parm_max; tree arg_max; bool parm_cst; bool arg_cst; /* Our representation of array types uses "N - 1" as the TYPE_MAX_VALUE for an array with "N" elements, if "N" is not an integer constant. We cannot unify arbitrarily complex expressions, so we eliminate the MINUS_EXPRs here. */ parm_max = TYPE_MAX_VALUE (parm_dom); parm_cst = TREE_CODE (parm_max) == INTEGER_CST; if (!parm_cst) { gcc_assert (TREE_CODE (parm_max) == MINUS_EXPR); parm_max = TREE_OPERAND (parm_max, 0); } arg_max = TYPE_MAX_VALUE (arg_dom); arg_cst = TREE_CODE (arg_max) == INTEGER_CST; if (!arg_cst) { /* The ARG_MAX may not be a simple MINUS_EXPR, if we are trying to unify the type of a variable with the type of a template parameter. For example: template <unsigned int N> void f (char (&) [N]); int g(); void h(int i) { char a[g(i)]; f(a); } Here, the type of the ARG will be "int [g(i)]", and may be a SAVE_EXPR, etc. */ if (TREE_CODE (arg_max) != MINUS_EXPR) return unify_vla_arg (explain_p, arg_dom); arg_max = TREE_OPERAND (arg_max, 0); } /* If only one of the bounds used a MINUS_EXPR, compensate by adding one to the other bound. */ if (parm_cst && !arg_cst) parm_max = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node, parm_max, integer_one_node); else if (arg_cst && !parm_cst) arg_max = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node, arg_max, integer_one_node); return unify (tparms, targs, parm_max, arg_max, UNIFY_ALLOW_INTEGER, explain_p); } /* Returns whether T, a P or A in unify, is a type, template or expression. */ enum pa_kind_t { pa_type, pa_tmpl, pa_expr }; static pa_kind_t pa_kind (tree t) { if (PACK_EXPANSION_P (t)) t = PACK_EXPANSION_PATTERN (t); if (TREE_CODE (t) == TEMPLATE_TEMPLATE_PARM || TREE_CODE (t) == UNBOUND_CLASS_TEMPLATE || DECL_TYPE_TEMPLATE_P (t)) return pa_tmpl; else if (TYPE_P (t)) return pa_type; else return pa_expr; } /* Deduce the value of template parameters. TPARMS is the (innermost) set of template parameters to a template. TARGS is the bindings for those template parameters, as determined thus far; TARGS may include template arguments for outer levels of template parameters as well. PARM is a parameter to a template function, or a subcomponent of that parameter; ARG is the corresponding argument. This function attempts to match PARM with ARG in a manner consistent with the existing assignments in TARGS. If more values are deduced, then TARGS is updated. Returns 0 if the type deduction succeeds, 1 otherwise. The parameter STRICT is a bitwise or of the following flags: UNIFY_ALLOW_NONE: Require an exact match between PARM and ARG. UNIFY_ALLOW_MORE_CV_QUAL: Allow the deduced ARG to be more cv-qualified (by qualification conversion) than ARG. UNIFY_ALLOW_LESS_CV_QUAL: Allow the deduced ARG to be less cv-qualified than ARG. UNIFY_ALLOW_DERIVED: Allow the deduced ARG to be a template base class of ARG, or a pointer to a template base class of the type pointed to by ARG. UNIFY_ALLOW_INTEGER: Allow any integral type to be deduced. See the TEMPLATE_PARM_INDEX case for more information. UNIFY_ALLOW_OUTER_LEVEL: This is the outermost level of a deduction. Used to determine validity of qualification conversions. A valid qualification conversion must have const qualified pointers leading up to the inner type which requires additional CV quals, except at the outer level, where const is not required [conv.qual]. It would be normal to set this flag in addition to setting UNIFY_ALLOW_MORE_CV_QUAL. UNIFY_ALLOW_OUTER_MORE_CV_QUAL: This is the outermost level of a deduction, and PARM can be more CV qualified at this point. UNIFY_ALLOW_OUTER_LESS_CV_QUAL: This is the outermost level of a deduction, and PARM can be less CV qualified at this point. */ static int unify (tree tparms, tree targs, tree parm, tree arg, int strict, bool explain_p) { int idx; tree targ; tree tparm; int strict_in = strict; tsubst_flags_t complain = (explain_p ? tf_warning_or_error : tf_none); /* I don't think this will do the right thing with respect to types. But the only case I've seen it in so far has been array bounds, where signedness is the only information lost, and I think that will be okay. */ while (CONVERT_EXPR_P (parm)) parm = TREE_OPERAND (parm, 0); if (arg == error_mark_node) return unify_invalid (explain_p); if (arg == unknown_type_node || arg == init_list_type_node) /* We can't deduce anything from this, but we might get all the template args from other function args. */ return unify_success (explain_p); if (parm == any_targ_node || arg == any_targ_node) return unify_success (explain_p); /* If PARM uses template parameters, then we can't bail out here, even if ARG == PARM, since we won't record unifications for the template parameters. We might need them if we're trying to figure out which of two things is more specialized. */ if (arg == parm && !uses_template_parms (parm)) return unify_success (explain_p); /* Handle init lists early, so the rest of the function can assume we're dealing with a type. */ if (BRACE_ENCLOSED_INITIALIZER_P (arg)) { tree elt, elttype; unsigned i; tree orig_parm = parm; /* Replace T with std::initializer_list<T> for deduction. */ if (TREE_CODE (parm) == TEMPLATE_TYPE_PARM && flag_deduce_init_list) parm = listify (parm); if (!is_std_init_list (parm) && TREE_CODE (parm) != ARRAY_TYPE) /* We can only deduce from an initializer list argument if the parameter is std::initializer_list or an array; otherwise this is a non-deduced context. */ return unify_success (explain_p); if (TREE_CODE (parm) == ARRAY_TYPE) elttype = TREE_TYPE (parm); else { elttype = TREE_VEC_ELT (CLASSTYPE_TI_ARGS (parm), 0); /* Deduction is defined in terms of a single type, so just punt on the (bizarre) std::initializer_list<T...>. */ if (PACK_EXPANSION_P (elttype)) return unify_success (explain_p); } FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (arg), i, elt) { int elt_strict = strict; if (elt == error_mark_node) return unify_invalid (explain_p); if (!BRACE_ENCLOSED_INITIALIZER_P (elt)) { tree type = TREE_TYPE (elt); if (type == error_mark_node) return unify_invalid (explain_p); /* It should only be possible to get here for a call. */ gcc_assert (elt_strict & UNIFY_ALLOW_OUTER_LEVEL); elt_strict |= maybe_adjust_types_for_deduction (DEDUCE_CALL, &elttype, &type, elt); elt = type; } RECUR_AND_CHECK_FAILURE (tparms, targs, elttype, elt, elt_strict, explain_p); } if (TREE_CODE (parm) == ARRAY_TYPE && deducible_array_bound (TYPE_DOMAIN (parm))) { /* Also deduce from the length of the initializer list. */ tree max = size_int (CONSTRUCTOR_NELTS (arg)); tree idx = compute_array_index_type (NULL_TREE, max, tf_none); if (idx == error_mark_node) return unify_invalid (explain_p); return unify_array_domain (tparms, targs, TYPE_DOMAIN (parm), idx, explain_p); } /* If the std::initializer_list<T> deduction worked, replace the deduced A with std::initializer_list<A>. */ if (orig_parm != parm) { idx = TEMPLATE_TYPE_IDX (orig_parm); targ = TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx); targ = listify (targ); TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx) = targ; } return unify_success (explain_p); } /* If parm and arg aren't the same kind of thing (template, type, or expression), fail early. */ if (pa_kind (parm) != pa_kind (arg)) return unify_invalid (explain_p); /* Immediately reject some pairs that won't unify because of cv-qualification mismatches. */ if (TREE_CODE (arg) == TREE_CODE (parm) && TYPE_P (arg) /* It is the elements of the array which hold the cv quals of an array type, and the elements might be template type parms. We'll check when we recurse. */ && TREE_CODE (arg) != ARRAY_TYPE /* We check the cv-qualifiers when unifying with template type parameters below. We want to allow ARG `const T' to unify with PARM `T' for example, when computing which of two templates is more specialized, for example. */ && TREE_CODE (arg) != TEMPLATE_TYPE_PARM && !check_cv_quals_for_unify (strict_in, arg, parm)) return unify_cv_qual_mismatch (explain_p, parm, arg); if (!(strict & UNIFY_ALLOW_OUTER_LEVEL) && TYPE_P (parm) && !CP_TYPE_CONST_P (parm)) strict &= ~UNIFY_ALLOW_MORE_CV_QUAL; strict &= ~UNIFY_ALLOW_OUTER_LEVEL; strict &= ~UNIFY_ALLOW_DERIVED; strict &= ~UNIFY_ALLOW_OUTER_MORE_CV_QUAL; strict &= ~UNIFY_ALLOW_OUTER_LESS_CV_QUAL; switch (TREE_CODE (parm)) { case TYPENAME_TYPE: case SCOPE_REF: case UNBOUND_CLASS_TEMPLATE: /* In a type which contains a nested-name-specifier, template argument values cannot be deduced for template parameters used within the nested-name-specifier. */ return unify_success (explain_p); case TEMPLATE_TYPE_PARM: case TEMPLATE_TEMPLATE_PARM: case BOUND_TEMPLATE_TEMPLATE_PARM: tparm = TREE_VALUE (TREE_VEC_ELT (tparms, 0)); if (error_operand_p (tparm)) return unify_invalid (explain_p); if (TEMPLATE_TYPE_LEVEL (parm) != template_decl_level (tparm)) /* The PARM is not one we're trying to unify. Just check to see if it matches ARG. */ { if (TREE_CODE (arg) == TREE_CODE (parm) && (is_auto (parm) ? is_auto (arg) : same_type_p (parm, arg))) return unify_success (explain_p); else return unify_type_mismatch (explain_p, parm, arg); } idx = TEMPLATE_TYPE_IDX (parm); targ = TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx); tparm = TREE_VALUE (TREE_VEC_ELT (tparms, idx)); if (error_operand_p (tparm)) return unify_invalid (explain_p); /* Check for mixed types and values. */ if ((TREE_CODE (parm) == TEMPLATE_TYPE_PARM && TREE_CODE (tparm) != TYPE_DECL) || (TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM && TREE_CODE (tparm) != TEMPLATE_DECL)) gcc_unreachable (); if (TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM) { if ((strict_in & UNIFY_ALLOW_DERIVED) && CLASS_TYPE_P (arg)) { /* First try to match ARG directly. */ tree t = try_class_unification (tparms, targs, parm, arg, explain_p); if (!t) { /* Otherwise, look for a suitable base of ARG, as below. */ enum template_base_result r; r = get_template_base (tparms, targs, parm, arg, explain_p, &t); if (!t) return unify_no_common_base (explain_p, r, parm, arg); arg = t; } } /* ARG must be constructed from a template class or a template template parameter. */ else if (TREE_CODE (arg) != BOUND_TEMPLATE_TEMPLATE_PARM && !CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P (arg)) return unify_template_deduction_failure (explain_p, parm, arg); /* Deduce arguments T, i from TT<T> or TT<i>. */ if (unify_bound_ttp_args (tparms, targs, parm, arg, explain_p)) return 1; arg = TYPE_TI_TEMPLATE (arg); /* Fall through to deduce template name. */ } if (TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM || TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM) { /* Deduce template name TT from TT, TT<>, TT<T> and TT<i>. */ /* Simple cases: Value already set, does match or doesn't. */ if (targ != NULL_TREE && template_args_equal (targ, arg)) return unify_success (explain_p); else if (targ) return unify_inconsistency (explain_p, parm, targ, arg); } else { /* If PARM is `const T' and ARG is only `int', we don't have a match unless we are allowing additional qualification. If ARG is `const int' and PARM is just `T' that's OK; that binds `const int' to `T'. */ if (!check_cv_quals_for_unify (strict_in | UNIFY_ALLOW_LESS_CV_QUAL, arg, parm)) return unify_cv_qual_mismatch (explain_p, parm, arg); /* Consider the case where ARG is `const volatile int' and PARM is `const T'. Then, T should be `volatile int'. */ arg = cp_build_qualified_type_real (arg, cp_type_quals (arg) & ~cp_type_quals (parm), tf_none); if (arg == error_mark_node) return unify_invalid (explain_p); /* Simple cases: Value already set, does match or doesn't. */ if (targ != NULL_TREE && same_type_p (targ, arg)) return unify_success (explain_p); else if (targ) return unify_inconsistency (explain_p, parm, targ, arg); /* Make sure that ARG is not a variable-sized array. (Note that were talking about variable-sized arrays (like `int[n]'), rather than arrays of unknown size (like `int[]').) We'll get very confused by such a type since the bound of the array is not constant, and therefore not mangleable. Besides, such types are not allowed in ISO C++, so we can do as we please here. We do allow them for 'auto' deduction, since that isn't ABI-exposed. */ if (!is_auto (parm) && variably_modified_type_p (arg, NULL_TREE)) return unify_vla_arg (explain_p, arg); /* Strip typedefs as in convert_template_argument. */ arg = canonicalize_type_argument (arg, tf_none); } /* If ARG is a parameter pack or an expansion, we cannot unify against it unless PARM is also a parameter pack. */ if ((template_parameter_pack_p (arg) || PACK_EXPANSION_P (arg)) && !template_parameter_pack_p (parm)) return unify_parameter_pack_mismatch (explain_p, parm, arg); /* If the argument deduction results is a METHOD_TYPE, then there is a problem. METHOD_TYPE doesn't map to any real C++ type the result of the deduction can not be of that type. */ if (TREE_CODE (arg) == METHOD_TYPE) return unify_method_type_error (explain_p, arg); TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx) = arg; return unify_success (explain_p); case TEMPLATE_PARM_INDEX: tparm = TREE_VALUE (TREE_VEC_ELT (tparms, 0)); if (error_operand_p (tparm)) return unify_invalid (explain_p); if (TEMPLATE_PARM_LEVEL (parm) != template_decl_level (tparm)) { /* The PARM is not one we're trying to unify. Just check to see if it matches ARG. */ int result = !(TREE_CODE (arg) == TREE_CODE (parm) && cp_tree_equal (parm, arg)); if (result) unify_expression_unequal (explain_p, parm, arg); return result; } idx = TEMPLATE_PARM_IDX (parm); targ = TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx); if (targ) { if ((strict & UNIFY_ALLOW_INTEGER) && TREE_TYPE (targ) && TREE_TYPE (arg) && CP_INTEGRAL_TYPE_P (TREE_TYPE (targ))) /* We're deducing from an array bound, the type doesn't matter. */ arg = fold_convert (TREE_TYPE (targ), arg); int x = !cp_tree_equal (targ, arg); if (x) unify_inconsistency (explain_p, parm, targ, arg); return x; } /* [temp.deduct.type] If, in the declaration of a function template with a non-type template-parameter, the non-type template-parameter is used in an expression in the function parameter-list and, if the corresponding template-argument is deduced, the template-argument type shall match the type of the template-parameter exactly, except that a template-argument deduced from an array bound may be of any integral type. The non-type parameter might use already deduced type parameters. */ tparm = TREE_TYPE (parm); if (TEMPLATE_PARM_LEVEL (parm) > TMPL_ARGS_DEPTH (targs)) /* We don't have enough levels of args to do any substitution. This can happen in the context of -fnew-ttp-matching. */; else { ++processing_template_decl; tparm = tsubst (tparm, targs, tf_none, NULL_TREE); --processing_template_decl; if (tree a = type_uses_auto (tparm)) { tparm = do_auto_deduction (tparm, arg, a, complain, adc_unify); if (tparm == error_mark_node) return 1; } } if (!TREE_TYPE (arg)) /* Template-parameter dependent expression. Just accept it for now. It will later be processed in convert_template_argument. */ ; else if (same_type_p (non_reference (TREE_TYPE (arg)), non_reference (tparm))) /* OK */; else if ((strict & UNIFY_ALLOW_INTEGER) && CP_INTEGRAL_TYPE_P (tparm)) /* Convert the ARG to the type of PARM; the deduced non-type template argument must exactly match the types of the corresponding parameter. */ arg = fold (build_nop (tparm, arg)); else if (uses_template_parms (tparm)) { /* We haven't deduced the type of this parameter yet. */ if (cxx_dialect >= cxx17 /* We deduce from array bounds in try_array_deduction. */ && !(strict & UNIFY_ALLOW_INTEGER)) { /* Deduce it from the non-type argument. */ tree atype = TREE_TYPE (arg); RECUR_AND_CHECK_FAILURE (tparms, targs, tparm, atype, UNIFY_ALLOW_NONE, explain_p); } else /* Try again later. */ return unify_success (explain_p); } else return unify_type_mismatch (explain_p, tparm, TREE_TYPE (arg)); /* If ARG is a parameter pack or an expansion, we cannot unify against it unless PARM is also a parameter pack. */ if ((template_parameter_pack_p (arg) || PACK_EXPANSION_P (arg)) && !TEMPLATE_PARM_PARAMETER_PACK (parm)) return unify_parameter_pack_mismatch (explain_p, parm, arg); { bool removed_attr = false; arg = strip_typedefs_expr (arg, &removed_attr); } TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx) = arg; return unify_success (explain_p); case PTRMEM_CST: { /* A pointer-to-member constant can be unified only with another constant. */ if (TREE_CODE (arg) != PTRMEM_CST) return unify_ptrmem_cst_mismatch (explain_p, parm, arg); /* Just unify the class member. It would be useless (and possibly wrong, depending on the strict flags) to unify also PTRMEM_CST_CLASS, because we want to be sure that both parm and arg refer to the same variable, even if through different classes. For instance: struct A { int x; }; struct B : A { }; Unification of &A::x and &B::x must succeed. */ return unify (tparms, targs, PTRMEM_CST_MEMBER (parm), PTRMEM_CST_MEMBER (arg), strict, explain_p); } case POINTER_TYPE: { if (!TYPE_PTR_P (arg)) return unify_type_mismatch (explain_p, parm, arg); /* [temp.deduct.call] A can be another pointer or pointer to member type that can be converted to the deduced A via a qualification conversion (_conv.qual_). We pass down STRICT here rather than UNIFY_ALLOW_NONE. This will allow for additional cv-qualification of the pointed-to types if appropriate. */ if (TREE_CODE (TREE_TYPE (arg)) == RECORD_TYPE) /* The derived-to-base conversion only persists through one level of pointers. */ strict |= (strict_in & UNIFY_ALLOW_DERIVED); return unify (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg), strict, explain_p); } case REFERENCE_TYPE: if (TREE_CODE (arg) != REFERENCE_TYPE) return unify_type_mismatch (explain_p, parm, arg); return unify (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg), strict & UNIFY_ALLOW_MORE_CV_QUAL, explain_p); case ARRAY_TYPE: if (TREE_CODE (arg) != ARRAY_TYPE) return unify_type_mismatch (explain_p, parm, arg); if ((TYPE_DOMAIN (parm) == NULL_TREE) != (TYPE_DOMAIN (arg) == NULL_TREE)) return unify_type_mismatch (explain_p, parm, arg); RECUR_AND_CHECK_FAILURE (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg), strict & UNIFY_ALLOW_MORE_CV_QUAL, explain_p); if (TYPE_DOMAIN (parm) != NULL_TREE) return unify_array_domain (tparms, targs, TYPE_DOMAIN (parm), TYPE_DOMAIN (arg), explain_p); return unify_success (explain_p); case REAL_TYPE: case COMPLEX_TYPE: case VECTOR_TYPE: case INTEGER_TYPE: case BOOLEAN_TYPE: case ENUMERAL_TYPE: case VOID_TYPE: case NULLPTR_TYPE: if (TREE_CODE (arg) != TREE_CODE (parm)) return unify_type_mismatch (explain_p, parm, arg); /* We have already checked cv-qualification at the top of the function. */ if (!same_type_ignoring_top_level_qualifiers_p (arg, parm)) return unify_type_mismatch (explain_p, parm, arg); /* As far as unification is concerned, this wins. Later checks will invalidate it if necessary. */ return unify_success (explain_p); /* Types INTEGER_CST and MINUS_EXPR can come from array bounds. */ /* Type INTEGER_CST can come from ordinary constant template args. */ case INTEGER_CST: while (CONVERT_EXPR_P (arg)) arg = TREE_OPERAND (arg, 0); if (TREE_CODE (arg) != INTEGER_CST) return unify_template_argument_mismatch (explain_p, parm, arg); return (tree_int_cst_equal (parm, arg) ? unify_success (explain_p) : unify_template_argument_mismatch (explain_p, parm, arg)); case TREE_VEC: { int i, len, argslen; int parm_variadic_p = 0; if (TREE_CODE (arg) != TREE_VEC) return unify_template_argument_mismatch (explain_p, parm, arg); len = TREE_VEC_LENGTH (parm); argslen = TREE_VEC_LENGTH (arg); /* Check for pack expansions in the parameters. */ for (i = 0; i < len; ++i) { if (PACK_EXPANSION_P (TREE_VEC_ELT (parm, i))) { if (i == len - 1) /* We can unify against something with a trailing parameter pack. */ parm_variadic_p = 1; else /* [temp.deduct.type]/9: If the template argument list of P contains a pack expansion that is not the last template argument, the entire template argument list is a non-deduced context. */ return unify_success (explain_p); } } /* If we don't have enough arguments to satisfy the parameters (not counting the pack expression at the end), or we have too many arguments for a parameter list that doesn't end in a pack expression, we can't unify. */ if (parm_variadic_p ? argslen < len - parm_variadic_p : argslen != len) return unify_arity (explain_p, TREE_VEC_LENGTH (arg), len); /* Unify all of the parameters that precede the (optional) pack expression. */ for (i = 0; i < len - parm_variadic_p; ++i) { RECUR_AND_CHECK_FAILURE (tparms, targs, TREE_VEC_ELT (parm, i), TREE_VEC_ELT (arg, i), UNIFY_ALLOW_NONE, explain_p); } if (parm_variadic_p) return unify_pack_expansion (tparms, targs, parm, arg, DEDUCE_EXACT, /*subr=*/true, explain_p); return unify_success (explain_p); } case RECORD_TYPE: case UNION_TYPE: if (TREE_CODE (arg) != TREE_CODE (parm)) return unify_type_mismatch (explain_p, parm, arg); if (TYPE_PTRMEMFUNC_P (parm)) { if (!TYPE_PTRMEMFUNC_P (arg)) return unify_type_mismatch (explain_p, parm, arg); return unify (tparms, targs, TYPE_PTRMEMFUNC_FN_TYPE (parm), TYPE_PTRMEMFUNC_FN_TYPE (arg), strict, explain_p); } else if (TYPE_PTRMEMFUNC_P (arg)) return unify_type_mismatch (explain_p, parm, arg); if (CLASSTYPE_TEMPLATE_INFO (parm)) { tree t = NULL_TREE; if (strict_in & UNIFY_ALLOW_DERIVED) { /* First, we try to unify the PARM and ARG directly. */ t = try_class_unification (tparms, targs, parm, arg, explain_p); if (!t) { /* Fallback to the special case allowed in [temp.deduct.call]: If P is a class, and P has the form template-id, then A can be a derived class of the deduced A. Likewise, if P is a pointer to a class of the form template-id, A can be a pointer to a derived class pointed to by the deduced A. */ enum template_base_result r; r = get_template_base (tparms, targs, parm, arg, explain_p, &t); if (!t) { /* Don't give the derived diagnostic if we're already dealing with the same template. */ bool same_template = (CLASSTYPE_TEMPLATE_INFO (arg) && (CLASSTYPE_TI_TEMPLATE (parm) == CLASSTYPE_TI_TEMPLATE (arg))); return unify_no_common_base (explain_p && !same_template, r, parm, arg); } } } else if (CLASSTYPE_TEMPLATE_INFO (arg) && (CLASSTYPE_TI_TEMPLATE (parm) == CLASSTYPE_TI_TEMPLATE (arg))) /* Perhaps PARM is something like S<U> and ARG is S<int>. Then, we should unify `int' and `U'. */ t = arg; else /* There's no chance of unification succeeding. */ return unify_type_mismatch (explain_p, parm, arg); return unify (tparms, targs, CLASSTYPE_TI_ARGS (parm), CLASSTYPE_TI_ARGS (t), UNIFY_ALLOW_NONE, explain_p); } else if (!same_type_ignoring_top_level_qualifiers_p (parm, arg)) return unify_type_mismatch (explain_p, parm, arg); return unify_success (explain_p); case METHOD_TYPE: case FUNCTION_TYPE: { unsigned int nargs; tree *args; tree a; unsigned int i; if (TREE_CODE (arg) != TREE_CODE (parm)) return unify_type_mismatch (explain_p, parm, arg); /* CV qualifications for methods can never be deduced, they must match exactly. We need to check them explicitly here, because type_unification_real treats them as any other cv-qualified parameter. */ if (TREE_CODE (parm) == METHOD_TYPE && (!check_cv_quals_for_unify (UNIFY_ALLOW_NONE, class_of_this_parm (arg), class_of_this_parm (parm)))) return unify_cv_qual_mismatch (explain_p, parm, arg); if (TREE_CODE (arg) == FUNCTION_TYPE && type_memfn_quals (parm) != type_memfn_quals (arg)) return unify_cv_qual_mismatch (explain_p, parm, arg); if (type_memfn_rqual (parm) != type_memfn_rqual (arg)) return unify_type_mismatch (explain_p, parm, arg); RECUR_AND_CHECK_FAILURE (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg), UNIFY_ALLOW_NONE, explain_p); nargs = list_length (TYPE_ARG_TYPES (arg)); args = XALLOCAVEC (tree, nargs); for (a = TYPE_ARG_TYPES (arg), i = 0; a != NULL_TREE && a != void_list_node; a = TREE_CHAIN (a), ++i) args[i] = TREE_VALUE (a); nargs = i; if (type_unification_real (tparms, targs, TYPE_ARG_TYPES (parm), args, nargs, 1, DEDUCE_EXACT, LOOKUP_NORMAL, NULL, explain_p)) return 1; if (flag_noexcept_type) { tree pspec = TYPE_RAISES_EXCEPTIONS (parm); tree aspec = canonical_eh_spec (TYPE_RAISES_EXCEPTIONS (arg)); if (pspec == NULL_TREE) pspec = noexcept_false_spec; if (aspec == NULL_TREE) aspec = noexcept_false_spec; if (TREE_PURPOSE (pspec) && TREE_PURPOSE (aspec) && uses_template_parms (TREE_PURPOSE (pspec))) RECUR_AND_CHECK_FAILURE (tparms, targs, TREE_PURPOSE (pspec), TREE_PURPOSE (aspec), UNIFY_ALLOW_NONE, explain_p); else if (nothrow_spec_p (pspec) && !nothrow_spec_p (aspec)) return unify_type_mismatch (explain_p, parm, arg); } return 0; } case OFFSET_TYPE: /* Unify a pointer to member with a pointer to member function, which deduces the type of the member as a function type. */ if (TYPE_PTRMEMFUNC_P (arg)) { /* Check top-level cv qualifiers */ if (!check_cv_quals_for_unify (UNIFY_ALLOW_NONE, arg, parm)) return unify_cv_qual_mismatch (explain_p, parm, arg); RECUR_AND_CHECK_FAILURE (tparms, targs, TYPE_OFFSET_BASETYPE (parm), TYPE_PTRMEMFUNC_OBJECT_TYPE (arg), UNIFY_ALLOW_NONE, explain_p); /* Determine the type of the function we are unifying against. */ tree fntype = static_fn_type (arg); return unify (tparms, targs, TREE_TYPE (parm), fntype, strict, explain_p); } if (TREE_CODE (arg) != OFFSET_TYPE) return unify_type_mismatch (explain_p, parm, arg); RECUR_AND_CHECK_FAILURE (tparms, targs, TYPE_OFFSET_BASETYPE (parm), TYPE_OFFSET_BASETYPE (arg), UNIFY_ALLOW_NONE, explain_p); return unify (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg), strict, explain_p); case CONST_DECL: if (DECL_TEMPLATE_PARM_P (parm)) return unify (tparms, targs, DECL_INITIAL (parm), arg, strict, explain_p); if (arg != scalar_constant_value (parm)) return unify_template_argument_mismatch (explain_p, parm, arg); return unify_success (explain_p); case FIELD_DECL: case TEMPLATE_DECL: /* Matched cases are handled by the ARG == PARM test above. */ return unify_template_argument_mismatch (explain_p, parm, arg); case VAR_DECL: /* We might get a variable as a non-type template argument in parm if the corresponding parameter is type-dependent. Make any necessary adjustments based on whether arg is a reference. */ if (CONSTANT_CLASS_P (arg)) parm = fold_non_dependent_expr (parm); else if (REFERENCE_REF_P (arg)) { tree sub = TREE_OPERAND (arg, 0); STRIP_NOPS (sub); if (TREE_CODE (sub) == ADDR_EXPR) arg = TREE_OPERAND (sub, 0); } /* Now use the normal expression code to check whether they match. */ goto expr; case TYPE_ARGUMENT_PACK: case NONTYPE_ARGUMENT_PACK: return unify (tparms, targs, ARGUMENT_PACK_ARGS (parm), ARGUMENT_PACK_ARGS (arg), strict, explain_p); case TYPEOF_TYPE: case DECLTYPE_TYPE: case UNDERLYING_TYPE: /* Cannot deduce anything from TYPEOF_TYPE, DECLTYPE_TYPE, or UNDERLYING_TYPE nodes. */ return unify_success (explain_p); case ERROR_MARK: /* Unification fails if we hit an error node. */ return unify_invalid (explain_p); case INDIRECT_REF: if (REFERENCE_REF_P (parm)) { bool pexp = PACK_EXPANSION_P (arg); if (pexp) arg = PACK_EXPANSION_PATTERN (arg); if (REFERENCE_REF_P (arg)) arg = TREE_OPERAND (arg, 0); if (pexp) arg = make_pack_expansion (arg, complain); return unify (tparms, targs, TREE_OPERAND (parm, 0), arg, strict, explain_p); } /* FALLTHRU */ default: /* An unresolved overload is a nondeduced context. */ if (is_overloaded_fn (parm) || type_unknown_p (parm)) return unify_success (explain_p); gcc_assert (EXPR_P (parm) || TREE_CODE (parm) == TRAIT_EXPR); expr: /* We must be looking at an expression. This can happen with something like: template <int I> void foo(S<I>, S<I + 2>); This is a "nondeduced context": [deduct.type] The nondeduced contexts are: --A type that is a template-id in which one or more of the template-arguments is an expression that references a template-parameter. In these cases, we assume deduction succeeded, but don't actually infer any unifications. */ if (!uses_template_parms (parm) && !template_args_equal (parm, arg)) return unify_expression_unequal (explain_p, parm, arg); else return unify_success (explain_p); } } #undef RECUR_AND_CHECK_FAILURE /* Note that DECL can be defined in this translation unit, if required. */ static void mark_definable (tree decl) { tree clone; DECL_NOT_REALLY_EXTERN (decl) = 1; FOR_EACH_CLONE (clone, decl) DECL_NOT_REALLY_EXTERN (clone) = 1; } /* Called if RESULT is explicitly instantiated, or is a member of an explicitly instantiated class. */ void mark_decl_instantiated (tree result, int extern_p) { SET_DECL_EXPLICIT_INSTANTIATION (result); /* If this entity has already been written out, it's too late to make any modifications. */ if (TREE_ASM_WRITTEN (result)) return; /* For anonymous namespace we don't need to do anything. */ if (decl_anon_ns_mem_p (result)) { gcc_assert (!TREE_PUBLIC (result)); return; } if (TREE_CODE (result) != FUNCTION_DECL) /* The TREE_PUBLIC flag for function declarations will have been set correctly by tsubst. */ TREE_PUBLIC (result) = 1; /* This might have been set by an earlier implicit instantiation. */ DECL_COMDAT (result) = 0; if (extern_p) DECL_NOT_REALLY_EXTERN (result) = 0; else { mark_definable (result); mark_needed (result); /* Always make artificials weak. */ if (DECL_ARTIFICIAL (result) && flag_weak) comdat_linkage (result); /* For WIN32 we also want to put explicit instantiations in linkonce sections. */ else if (TREE_PUBLIC (result)) maybe_make_one_only (result); } /* If EXTERN_P, then this function will not be emitted -- unless followed by an explicit instantiation, at which point its linkage will be adjusted. If !EXTERN_P, then this function will be emitted here. In neither circumstance do we want import_export_decl to adjust the linkage. */ DECL_INTERFACE_KNOWN (result) = 1; } /* Subroutine of more_specialized_fn: check whether TARGS is missing any important template arguments. If any are missing, we check whether they're important by using error_mark_node for substituting into any args that were used for partial ordering (the ones between ARGS and END) and seeing if it bubbles up. */ static bool check_undeduced_parms (tree targs, tree args, tree end) { bool found = false; int i; for (i = TREE_VEC_LENGTH (targs) - 1; i >= 0; --i) if (TREE_VEC_ELT (targs, i) == NULL_TREE) { found = true; TREE_VEC_ELT (targs, i) = error_mark_node; } if (found) { tree substed = tsubst_arg_types (args, targs, end, tf_none, NULL_TREE); if (substed == error_mark_node) return true; } return false; } /* Given two function templates PAT1 and PAT2, return: 1 if PAT1 is more specialized than PAT2 as described in [temp.func.order]. -1 if PAT2 is more specialized than PAT1. 0 if neither is more specialized. LEN indicates the number of parameters we should consider (defaulted parameters should not be considered). The 1998 std underspecified function template partial ordering, and DR214 addresses the issue. We take pairs of arguments, one from each of the templates, and deduce them against each other. One of the templates will be more specialized if all the *other* template's arguments deduce against its arguments and at least one of its arguments *does* *not* deduce against the other template's corresponding argument. Deduction is done as for class templates. The arguments used in deduction have reference and top level cv qualifiers removed. Iff both arguments were originally reference types *and* deduction succeeds in both directions, an lvalue reference wins against an rvalue reference and otherwise the template with the more cv-qualified argument wins for that pairing (if neither is more cv-qualified, they both are equal). Unlike regular deduction, after all the arguments have been deduced in this way, we do *not* verify the deduced template argument values can be substituted into non-deduced contexts. The logic can be a bit confusing here, because we look at deduce1 and targs1 to see if pat2 is at least as specialized, and vice versa; if we can find template arguments for pat1 to make arg1 look like arg2, that means that arg2 is at least as specialized as arg1. */ int more_specialized_fn (tree pat1, tree pat2, int len) { tree decl1 = DECL_TEMPLATE_RESULT (pat1); tree decl2 = DECL_TEMPLATE_RESULT (pat2); tree targs1 = make_tree_vec (DECL_NTPARMS (pat1)); tree targs2 = make_tree_vec (DECL_NTPARMS (pat2)); tree tparms1 = DECL_INNERMOST_TEMPLATE_PARMS (pat1); tree tparms2 = DECL_INNERMOST_TEMPLATE_PARMS (pat2); tree args1 = TYPE_ARG_TYPES (TREE_TYPE (decl1)); tree args2 = TYPE_ARG_TYPES (TREE_TYPE (decl2)); tree origs1, origs2; bool lose1 = false; bool lose2 = false; /* Remove the this parameter from non-static member functions. If one is a non-static member function and the other is not a static member function, remove the first parameter from that function also. This situation occurs for operator functions where we locate both a member function (with this pointer) and non-member operator (with explicit first operand). */ if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl1)) { len--; /* LEN is the number of significant arguments for DECL1 */ args1 = TREE_CHAIN (args1); if (!DECL_STATIC_FUNCTION_P (decl2)) args2 = TREE_CHAIN (args2); } else if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl2)) { args2 = TREE_CHAIN (args2); if (!DECL_STATIC_FUNCTION_P (decl1)) { len--; args1 = TREE_CHAIN (args1); } } /* If only one is a conversion operator, they are unordered. */ if (DECL_CONV_FN_P (decl1) != DECL_CONV_FN_P (decl2)) return 0; /* Consider the return type for a conversion function */ if (DECL_CONV_FN_P (decl1)) { args1 = tree_cons (NULL_TREE, TREE_TYPE (TREE_TYPE (decl1)), args1); args2 = tree_cons (NULL_TREE, TREE_TYPE (TREE_TYPE (decl2)), args2); len++; } processing_template_decl++; origs1 = args1; origs2 = args2; while (len-- /* Stop when an ellipsis is seen. */ && args1 != NULL_TREE && args2 != NULL_TREE) { tree arg1 = TREE_VALUE (args1); tree arg2 = TREE_VALUE (args2); int deduce1, deduce2; int quals1 = -1; int quals2 = -1; int ref1 = 0; int ref2 = 0; if (TREE_CODE (arg1) == TYPE_PACK_EXPANSION && TREE_CODE (arg2) == TYPE_PACK_EXPANSION) { /* When both arguments are pack expansions, we need only unify the patterns themselves. */ arg1 = PACK_EXPANSION_PATTERN (arg1); arg2 = PACK_EXPANSION_PATTERN (arg2); /* This is the last comparison we need to do. */ len = 0; } /* DR 1847: If a particular P contains no template-parameters that participate in template argument deduction, that P is not used to determine the ordering. */ if (!uses_deducible_template_parms (arg1) && !uses_deducible_template_parms (arg2)) goto next; if (TREE_CODE (arg1) == REFERENCE_TYPE) { ref1 = TYPE_REF_IS_RVALUE (arg1) + 1; arg1 = TREE_TYPE (arg1); quals1 = cp_type_quals (arg1); } if (TREE_CODE (arg2) == REFERENCE_TYPE) { ref2 = TYPE_REF_IS_RVALUE (arg2) + 1; arg2 = TREE_TYPE (arg2); quals2 = cp_type_quals (arg2); } arg1 = TYPE_MAIN_VARIANT (arg1); arg2 = TYPE_MAIN_VARIANT (arg2); if (TREE_CODE (arg1) == TYPE_PACK_EXPANSION) { int i, len2 = remaining_arguments (args2); tree parmvec = make_tree_vec (1); tree argvec = make_tree_vec (len2); tree ta = args2; /* Setup the parameter vector, which contains only ARG1. */ TREE_VEC_ELT (parmvec, 0) = arg1; /* Setup the argument vector, which contains the remaining arguments. */ for (i = 0; i < len2; i++, ta = TREE_CHAIN (ta)) TREE_VEC_ELT (argvec, i) = TREE_VALUE (ta); deduce1 = (unify_pack_expansion (tparms1, targs1, parmvec, argvec, DEDUCE_EXACT, /*subr=*/true, /*explain_p=*/false) == 0); /* We cannot deduce in the other direction, because ARG1 is a pack expansion but ARG2 is not. */ deduce2 = 0; } else if (TREE_CODE (arg2) == TYPE_PACK_EXPANSION) { int i, len1 = remaining_arguments (args1); tree parmvec = make_tree_vec (1); tree argvec = make_tree_vec (len1); tree ta = args1; /* Setup the parameter vector, which contains only ARG1. */ TREE_VEC_ELT (parmvec, 0) = arg2; /* Setup the argument vector, which contains the remaining arguments. */ for (i = 0; i < len1; i++, ta = TREE_CHAIN (ta)) TREE_VEC_ELT (argvec, i) = TREE_VALUE (ta); deduce2 = (unify_pack_expansion (tparms2, targs2, parmvec, argvec, DEDUCE_EXACT, /*subr=*/true, /*explain_p=*/false) == 0); /* We cannot deduce in the other direction, because ARG2 is a pack expansion but ARG1 is not.*/ deduce1 = 0; } else { /* The normal case, where neither argument is a pack expansion. */ deduce1 = (unify (tparms1, targs1, arg1, arg2, UNIFY_ALLOW_NONE, /*explain_p=*/false) == 0); deduce2 = (unify (tparms2, targs2, arg2, arg1, UNIFY_ALLOW_NONE, /*explain_p=*/false) == 0); } /* If we couldn't deduce arguments for tparms1 to make arg1 match arg2, then arg2 is not as specialized as arg1. */ if (!deduce1) lose2 = true; if (!deduce2) lose1 = true; /* "If, for a given type, deduction succeeds in both directions (i.e., the types are identical after the transformations above) and both P and A were reference types (before being replaced with the type referred to above): - if the type from the argument template was an lvalue reference and the type from the parameter template was not, the argument type is considered to be more specialized than the other; otherwise, - if the type from the argument template is more cv-qualified than the type from the parameter template (as described above), the argument type is considered to be more specialized than the other; otherwise, - neither type is more specialized than the other." */ if (deduce1 && deduce2) { if (ref1 && ref2 && ref1 != ref2) { if (ref1 > ref2) lose1 = true; else lose2 = true; } else if (quals1 != quals2 && quals1 >= 0 && quals2 >= 0) { if ((quals1 & quals2) == quals2) lose2 = true; if ((quals1 & quals2) == quals1) lose1 = true; } } if (lose1 && lose2) /* We've failed to deduce something in either direction. These must be unordered. */ break; next: if (TREE_CODE (arg1) == TYPE_PACK_EXPANSION || TREE_CODE (arg2) == TYPE_PACK_EXPANSION) /* We have already processed all of the arguments in our handing of the pack expansion type. */ len = 0; args1 = TREE_CHAIN (args1); args2 = TREE_CHAIN (args2); } /* "In most cases, all template parameters must have values in order for deduction to succeed, but for partial ordering purposes a template parameter may remain without a value provided it is not used in the types being used for partial ordering." Thus, if we are missing any of the targs1 we need to substitute into origs1, then pat2 is not as specialized as pat1. This can happen when there is a nondeduced context. */ if (!lose2 && check_undeduced_parms (targs1, origs1, args1)) lose2 = true; if (!lose1 && check_undeduced_parms (targs2, origs2, args2)) lose1 = true; processing_template_decl--; /* If both deductions succeed, the partial ordering selects the more constrained template. */ if (!lose1 && !lose2) { tree c1 = get_constraints (DECL_TEMPLATE_RESULT (pat1)); tree c2 = get_constraints (DECL_TEMPLATE_RESULT (pat2)); lose1 = !subsumes_constraints (c1, c2); lose2 = !subsumes_constraints (c2, c1); } /* All things being equal, if the next argument is a pack expansion for one function but not for the other, prefer the non-variadic function. FIXME this is bogus; see c++/41958. */ if (lose1 == lose2 && args1 && TREE_VALUE (args1) && args2 && TREE_VALUE (args2)) { lose1 = TREE_CODE (TREE_VALUE (args1)) == TYPE_PACK_EXPANSION; lose2 = TREE_CODE (TREE_VALUE (args2)) == TYPE_PACK_EXPANSION; } if (lose1 == lose2) return 0; else if (!lose1) return 1; else return -1; } /* Determine which of two partial specializations of TMPL is more specialized. PAT1 is a TREE_LIST whose TREE_VALUE is the TEMPLATE_DECL corresponding to the first partial specialization. The TREE_PURPOSE is the innermost set of template parameters for the partial specialization. PAT2 is similar, but for the second template. Return 1 if the first partial specialization is more specialized; -1 if the second is more specialized; 0 if neither is more specialized. See [temp.class.order] for information about determining which of two templates is more specialized. */ static int more_specialized_partial_spec (tree tmpl, tree pat1, tree pat2) { tree targs; int winner = 0; bool any_deductions = false; tree tmpl1 = TREE_VALUE (pat1); tree tmpl2 = TREE_VALUE (pat2); tree specargs1 = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (tmpl1))); tree specargs2 = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (tmpl2))); /* Just like what happens for functions, if we are ordering between different template specializations, we may encounter dependent types in the arguments, and we need our dependency check functions to behave correctly. */ ++processing_template_decl; targs = get_partial_spec_bindings (tmpl, tmpl1, specargs2); if (targs) { --winner; any_deductions = true; } targs = get_partial_spec_bindings (tmpl, tmpl2, specargs1); if (targs) { ++winner; any_deductions = true; } --processing_template_decl; /* If both deductions succeed, the partial ordering selects the more constrained template. */ if (!winner && any_deductions) return more_constrained (tmpl1, tmpl2); /* In the case of a tie where at least one of the templates has a parameter pack at the end, the template with the most non-packed parameters wins. */ if (winner == 0 && any_deductions && (template_args_variadic_p (TREE_PURPOSE (pat1)) || template_args_variadic_p (TREE_PURPOSE (pat2)))) { tree args1 = INNERMOST_TEMPLATE_ARGS (TREE_PURPOSE (pat1)); tree args2 = INNERMOST_TEMPLATE_ARGS (TREE_PURPOSE (pat2)); int len1 = TREE_VEC_LENGTH (args1); int len2 = TREE_VEC_LENGTH (args2); /* We don't count the pack expansion at the end. */ if (template_args_variadic_p (TREE_PURPOSE (pat1))) --len1; if (template_args_variadic_p (TREE_PURPOSE (pat2))) --len2; if (len1 > len2) return 1; else if (len1 < len2) return -1; } return winner; } /* Return the template arguments that will produce the function signature DECL from the function template FN, with the explicit template arguments EXPLICIT_ARGS. If CHECK_RETTYPE is true, the return type must also match. Return NULL_TREE if no satisfactory arguments could be found. */ static tree get_bindings (tree fn, tree decl, tree explicit_args, bool check_rettype) { int ntparms = DECL_NTPARMS (fn); tree targs = make_tree_vec (ntparms); tree decl_type = TREE_TYPE (decl); tree decl_arg_types; tree *args; unsigned int nargs, ix; tree arg; gcc_assert (decl != DECL_TEMPLATE_RESULT (fn)); /* Never do unification on the 'this' parameter. */ decl_arg_types = skip_artificial_parms_for (decl, TYPE_ARG_TYPES (decl_type)); nargs = list_length (decl_arg_types); args = XALLOCAVEC (tree, nargs); for (arg = decl_arg_types, ix = 0; arg != NULL_TREE && arg != void_list_node; arg = TREE_CHAIN (arg), ++ix) args[ix] = TREE_VALUE (arg); if (fn_type_unification (fn, explicit_args, targs, args, ix, (check_rettype || DECL_CONV_FN_P (fn) ? TREE_TYPE (decl_type) : NULL_TREE), DEDUCE_EXACT, LOOKUP_NORMAL, /*explain_p=*/false, /*decltype*/false) == error_mark_node) return NULL_TREE; return targs; } /* Return the innermost template arguments that, when applied to a partial specialization SPEC_TMPL of TMPL, yield the ARGS. For example, suppose we have: template <class T, class U> struct S {}; template <class T> struct S<T*, int> {}; Then, suppose we want to get `S<double*, int>'. SPEC_TMPL will be the partial specialization and the ARGS will be {double*, int}. The resulting vector will be {double}, indicating that `T' is bound to `double'. */ static tree get_partial_spec_bindings (tree tmpl, tree spec_tmpl, tree args) { tree tparms = DECL_INNERMOST_TEMPLATE_PARMS (spec_tmpl); tree spec_args = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (spec_tmpl))); int i, ntparms = TREE_VEC_LENGTH (tparms); tree deduced_args; tree innermost_deduced_args; innermost_deduced_args = make_tree_vec (ntparms); if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (args)) { deduced_args = copy_node (args); SET_TMPL_ARGS_LEVEL (deduced_args, TMPL_ARGS_DEPTH (deduced_args), innermost_deduced_args); } else deduced_args = innermost_deduced_args; bool tried_array_deduction = (cxx_dialect < cxx17); again: if (unify (tparms, deduced_args, INNERMOST_TEMPLATE_ARGS (spec_args), INNERMOST_TEMPLATE_ARGS (args), UNIFY_ALLOW_NONE, /*explain_p=*/false)) return NULL_TREE; for (i = 0; i < ntparms; ++i) if (! TREE_VEC_ELT (innermost_deduced_args, i)) { if (!tried_array_deduction) { try_array_deduction (tparms, innermost_deduced_args, INNERMOST_TEMPLATE_ARGS (spec_args)); tried_array_deduction = true; if (TREE_VEC_ELT (innermost_deduced_args, i)) goto again; } return NULL_TREE; } if (!push_tinst_level (spec_tmpl, deduced_args)) { excessive_deduction_depth = true; return NULL_TREE; } /* Verify that nondeduced template arguments agree with the type obtained from argument deduction. For example: struct A { typedef int X; }; template <class T, class U> struct C {}; template <class T> struct C<T, typename T::X> {}; Then with the instantiation `C<A, int>', we can deduce that `T' is `A' but unify () does not check whether `typename T::X' is `int'. */ spec_args = tsubst (spec_args, deduced_args, tf_none, NULL_TREE); if (spec_args != error_mark_node) spec_args = coerce_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (tmpl), INNERMOST_TEMPLATE_ARGS (spec_args), tmpl, tf_none, false, false); pop_tinst_level (); if (spec_args == error_mark_node /* We only need to check the innermost arguments; the other arguments will always agree. */ || !comp_template_args_porder (INNERMOST_TEMPLATE_ARGS (spec_args), INNERMOST_TEMPLATE_ARGS (args))) return NULL_TREE; /* Now that we have bindings for all of the template arguments, ensure that the arguments deduced for the template template parameters have compatible template parameter lists. See the use of template_template_parm_bindings_ok_p in fn_type_unification for more information. */ if (!template_template_parm_bindings_ok_p (tparms, deduced_args)) return NULL_TREE; return deduced_args; } // Compare two function templates T1 and T2 by deducing bindings // from one against the other. If both deductions succeed, compare // constraints to see which is more constrained. static int more_specialized_inst (tree t1, tree t2) { int fate = 0; int count = 0; if (get_bindings (t1, DECL_TEMPLATE_RESULT (t2), NULL_TREE, true)) { --fate; ++count; } if (get_bindings (t2, DECL_TEMPLATE_RESULT (t1), NULL_TREE, true)) { ++fate; ++count; } // If both deductions succeed, then one may be more constrained. if (count == 2 && fate == 0) fate = more_constrained (t1, t2); return fate; } /* TEMPLATES is a TREE_LIST. Each TREE_VALUE is a TEMPLATE_DECL. Return the TREE_LIST node with the most specialized template, if any. If there is no most specialized template, the error_mark_node is returned. Note that this function does not look at, or modify, the TREE_PURPOSE or TREE_TYPE of any of the nodes. Since the node returned is one of the elements of INSTANTIATIONS, callers may store information in the TREE_PURPOSE or TREE_TYPE of the nodes, and retrieve it from the value returned. */ tree most_specialized_instantiation (tree templates) { tree fn, champ; ++processing_template_decl; champ = templates; for (fn = TREE_CHAIN (templates); fn; fn = TREE_CHAIN (fn)) { gcc_assert (TREE_VALUE (champ) != TREE_VALUE (fn)); int fate = more_specialized_inst (TREE_VALUE (champ), TREE_VALUE (fn)); if (fate == -1) champ = fn; else if (!fate) { /* Equally specialized, move to next function. If there is no next function, nothing's most specialized. */ fn = TREE_CHAIN (fn); champ = fn; if (!fn) break; } } if (champ) /* Now verify that champ is better than everything earlier in the instantiation list. */ for (fn = templates; fn != champ; fn = TREE_CHAIN (fn)) { if (more_specialized_inst (TREE_VALUE (champ), TREE_VALUE (fn)) != 1) { champ = NULL_TREE; break; } } processing_template_decl--; if (!champ) return error_mark_node; return champ; } /* If DECL is a specialization of some template, return the most general such template. Otherwise, returns NULL_TREE. For example, given: template <class T> struct S { template <class U> void f(U); }; if TMPL is `template <class U> void S<int>::f(U)' this will return the full template. This function will not trace past partial specializations, however. For example, given in addition: template <class T> struct S<T*> { template <class U> void f(U); }; if TMPL is `template <class U> void S<int*>::f(U)' this will return `template <class T> template <class U> S<T*>::f(U)'. */ tree most_general_template (tree decl) { if (TREE_CODE (decl) != TEMPLATE_DECL) { if (tree tinfo = get_template_info (decl)) decl = TI_TEMPLATE (tinfo); /* The TI_TEMPLATE can be an IDENTIFIER_NODE for a template friend, or a FIELD_DECL for a capture pack. */ if (TREE_CODE (decl) != TEMPLATE_DECL) return NULL_TREE; } /* Look for more and more general templates. */ while (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl)) { /* The DECL_TI_TEMPLATE can be an IDENTIFIER_NODE in some cases. (See cp-tree.h for details.) */ if (TREE_CODE (DECL_TI_TEMPLATE (decl)) != TEMPLATE_DECL) break; if (CLASS_TYPE_P (TREE_TYPE (decl)) && !TYPE_DECL_ALIAS_P (TYPE_NAME (TREE_TYPE (decl))) && CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (decl))) break; /* Stop if we run into an explicitly specialized class template. */ if (!DECL_NAMESPACE_SCOPE_P (decl) && DECL_CONTEXT (decl) && CLASSTYPE_TEMPLATE_SPECIALIZATION (DECL_CONTEXT (decl))) break; decl = DECL_TI_TEMPLATE (decl); } return decl; } /* Return the most specialized of the template partial specializations which can produce TARGET, a specialization of some class or variable template. The value returned is actually a TREE_LIST; the TREE_VALUE is a TEMPLATE_DECL node corresponding to the partial specialization, while the TREE_PURPOSE is the set of template arguments that must be substituted into the template pattern in order to generate TARGET. If the choice of partial specialization is ambiguous, a diagnostic is issued, and the error_mark_node is returned. If there are no partial specializations matching TARGET, then NULL_TREE is returned, indicating that the primary template should be used. */ static tree most_specialized_partial_spec (tree target, tsubst_flags_t complain) { tree list = NULL_TREE; tree t; tree champ; int fate; bool ambiguous_p; tree outer_args = NULL_TREE; tree tmpl, args; if (TYPE_P (target)) { tree tinfo = CLASSTYPE_TEMPLATE_INFO (target); tmpl = TI_TEMPLATE (tinfo); args = TI_ARGS (tinfo); } else if (TREE_CODE (target) == TEMPLATE_ID_EXPR) { tmpl = TREE_OPERAND (target, 0); args = TREE_OPERAND (target, 1); } else if (VAR_P (target)) { tree tinfo = DECL_TEMPLATE_INFO (target); tmpl = TI_TEMPLATE (tinfo); args = TI_ARGS (tinfo); } else gcc_unreachable (); tree main_tmpl = most_general_template (tmpl); /* For determining which partial specialization to use, only the innermost args are interesting. */ if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (args)) { outer_args = strip_innermost_template_args (args, 1); args = INNERMOST_TEMPLATE_ARGS (args); } for (t = DECL_TEMPLATE_SPECIALIZATIONS (main_tmpl); t; t = TREE_CHAIN (t)) { tree spec_args; tree spec_tmpl = TREE_VALUE (t); if (outer_args) { /* Substitute in the template args from the enclosing class. */ ++processing_template_decl; spec_tmpl = tsubst (spec_tmpl, outer_args, tf_none, NULL_TREE); --processing_template_decl; } if (spec_tmpl == error_mark_node) return error_mark_node; spec_args = get_partial_spec_bindings (tmpl, spec_tmpl, args); if (spec_args) { if (outer_args) spec_args = add_to_template_args (outer_args, spec_args); /* Keep the candidate only if the constraints are satisfied, or if we're not compiling with concepts. */ if (!flag_concepts || constraints_satisfied_p (spec_tmpl, spec_args)) { list = tree_cons (spec_args, TREE_VALUE (t), list); TREE_TYPE (list) = TREE_TYPE (t); } } } if (! list) return NULL_TREE; ambiguous_p = false; t = list; champ = t; t = TREE_CHAIN (t); for (; t; t = TREE_CHAIN (t)) { fate = more_specialized_partial_spec (tmpl, champ, t); if (fate == 1) ; else { if (fate == 0) { t = TREE_CHAIN (t); if (! t) { ambiguous_p = true; break; } } champ = t; } } if (!ambiguous_p) for (t = list; t && t != champ; t = TREE_CHAIN (t)) { fate = more_specialized_partial_spec (tmpl, champ, t); if (fate != 1) { ambiguous_p = true; break; } } if (ambiguous_p) { const char *str; char *spaces = NULL; if (!(complain & tf_error)) return error_mark_node; if (TYPE_P (target)) error ("ambiguous template instantiation for %q#T", target); else error ("ambiguous template instantiation for %q#D", target); str = ngettext ("candidate is:", "candidates are:", list_length (list)); for (t = list; t; t = TREE_CHAIN (t)) { tree subst = build_tree_list (TREE_VALUE (t), TREE_PURPOSE (t)); inform (DECL_SOURCE_LOCATION (TREE_VALUE (t)), "%s %#qS", spaces ? spaces : str, subst); spaces = spaces ? spaces : get_spaces (str); } free (spaces); return error_mark_node; } return champ; } /* Explicitly instantiate DECL. */ void do_decl_instantiation (tree decl, tree storage) { tree result = NULL_TREE; int extern_p = 0; if (!decl || decl == error_mark_node) /* An error occurred, for which grokdeclarator has already issued an appropriate message. */ return; else if (! DECL_LANG_SPECIFIC (decl)) { error ("explicit instantiation of non-template %q#D", decl); return; } bool var_templ = (DECL_TEMPLATE_INFO (decl) && variable_template_p (DECL_TI_TEMPLATE (decl))); if (VAR_P (decl) && !var_templ) { /* There is an asymmetry here in the way VAR_DECLs and FUNCTION_DECLs are handled by grokdeclarator. In the case of the latter, the DECL we get back will be marked as a template instantiation, and the appropriate DECL_TEMPLATE_INFO will be set up. This does not happen for VAR_DECLs so we do the lookup here. Probably, grokdeclarator should handle VAR_DECLs as it currently handles FUNCTION_DECLs. */ if (!DECL_CLASS_SCOPE_P (decl)) { error ("%qD is not a static data member of a class template", decl); return; } result = lookup_field (DECL_CONTEXT (decl), DECL_NAME (decl), 0, false); if (!result || !VAR_P (result)) { error ("no matching template for %qD found", decl); return; } if (!same_type_p (TREE_TYPE (result), TREE_TYPE (decl))) { error ("type %qT for explicit instantiation %qD does not match " "declared type %qT", TREE_TYPE (result), decl, TREE_TYPE (decl)); return; } } else if (TREE_CODE (decl) != FUNCTION_DECL && !var_templ) { error ("explicit instantiation of %q#D", decl); return; } else result = decl; /* Check for various error cases. Note that if the explicit instantiation is valid the RESULT will currently be marked as an *implicit* instantiation; DECL_EXPLICIT_INSTANTIATION is not set until we get here. */ if (DECL_TEMPLATE_SPECIALIZATION (result)) { /* DR 259 [temp.spec]. Both an explicit instantiation and a declaration of an explicit specialization shall not appear in a program unless the explicit instantiation follows a declaration of the explicit specialization. For a given set of template parameters, if an explicit instantiation of a template appears after a declaration of an explicit specialization for that template, the explicit instantiation has no effect. */ return; } else if (DECL_EXPLICIT_INSTANTIATION (result)) { /* [temp.spec] No program shall explicitly instantiate any template more than once. We check DECL_NOT_REALLY_EXTERN so as not to complain when the first instantiation was `extern' and the second is not, and EXTERN_P for the opposite case. */ if (DECL_NOT_REALLY_EXTERN (result) && !extern_p) permerror (input_location, "duplicate explicit instantiation of %q#D", result); /* If an "extern" explicit instantiation follows an ordinary explicit instantiation, the template is instantiated. */ if (extern_p) return; } else if (!DECL_IMPLICIT_INSTANTIATION (result)) { error ("no matching template for %qD found", result); return; } else if (!DECL_TEMPLATE_INFO (result)) { permerror (input_location, "explicit instantiation of non-template %q#D", result); return; } if (storage == NULL_TREE) ; else if (storage == ridpointers[(int) RID_EXTERN]) { if (!in_system_header_at (input_location) && (cxx_dialect == cxx98)) pedwarn (input_location, OPT_Wpedantic, "ISO C++ 1998 forbids the use of %<extern%> on explicit " "instantiations"); extern_p = 1; } else error ("storage class %qD applied to template instantiation", storage); check_explicit_instantiation_namespace (result); mark_decl_instantiated (result, extern_p); if (! extern_p) instantiate_decl (result, /*defer_ok=*/true, /*expl_inst_class_mem_p=*/false); } static void mark_class_instantiated (tree t, int extern_p) { SET_CLASSTYPE_EXPLICIT_INSTANTIATION (t); SET_CLASSTYPE_INTERFACE_KNOWN (t); CLASSTYPE_INTERFACE_ONLY (t) = extern_p; TYPE_DECL_SUPPRESS_DEBUG (TYPE_NAME (t)) = extern_p; if (! extern_p) { CLASSTYPE_DEBUG_REQUESTED (t) = 1; rest_of_type_compilation (t, 1); } } /* Called from do_type_instantiation through binding_table_foreach to do recursive instantiation for the type bound in ENTRY. */ static void bt_instantiate_type_proc (binding_entry entry, void *data) { tree storage = *(tree *) data; if (MAYBE_CLASS_TYPE_P (entry->type) && CLASSTYPE_TEMPLATE_INFO (entry->type) && !uses_template_parms (CLASSTYPE_TI_ARGS (entry->type))) do_type_instantiation (TYPE_MAIN_DECL (entry->type), storage, 0); } /* Perform an explicit instantiation of template class T. STORAGE, if non-null, is the RID for extern, inline or static. COMPLAIN is nonzero if this is called from the parser, zero if called recursively, since the standard is unclear (as detailed below). */ void do_type_instantiation (tree t, tree storage, tsubst_flags_t complain) { int extern_p = 0; int nomem_p = 0; int static_p = 0; int previous_instantiation_extern_p = 0; if (TREE_CODE (t) == TYPE_DECL) t = TREE_TYPE (t); if (! CLASS_TYPE_P (t) || ! CLASSTYPE_TEMPLATE_INFO (t)) { tree tmpl = (TYPE_TEMPLATE_INFO (t)) ? TYPE_TI_TEMPLATE (t) : NULL; if (tmpl) error ("explicit instantiation of non-class template %qD", tmpl); else error ("explicit instantiation of non-template type %qT", t); return; } complete_type (t); if (!COMPLETE_TYPE_P (t)) { if (complain & tf_error) error ("explicit instantiation of %q#T before definition of template", t); return; } if (storage != NULL_TREE) { if (!in_system_header_at (input_location)) { if (storage == ridpointers[(int) RID_EXTERN]) { if (cxx_dialect == cxx98) pedwarn (input_location, OPT_Wpedantic, "ISO C++ 1998 forbids the use of %<extern%> on " "explicit instantiations"); } else pedwarn (input_location, OPT_Wpedantic, "ISO C++ forbids the use of %qE" " on explicit instantiations", storage); } if (storage == ridpointers[(int) RID_INLINE]) nomem_p = 1; else if (storage == ridpointers[(int) RID_EXTERN]) extern_p = 1; else if (storage == ridpointers[(int) RID_STATIC]) static_p = 1; else { error ("storage class %qD applied to template instantiation", storage); extern_p = 0; } } if (CLASSTYPE_TEMPLATE_SPECIALIZATION (t)) { /* DR 259 [temp.spec]. Both an explicit instantiation and a declaration of an explicit specialization shall not appear in a program unless the explicit instantiation follows a declaration of the explicit specialization. For a given set of template parameters, if an explicit instantiation of a template appears after a declaration of an explicit specialization for that template, the explicit instantiation has no effect. */ return; } else if (CLASSTYPE_EXPLICIT_INSTANTIATION (t)) { /* [temp.spec] No program shall explicitly instantiate any template more than once. If PREVIOUS_INSTANTIATION_EXTERN_P, then the first explicit instantiation was `extern'. If EXTERN_P then the second is. These cases are OK. */ previous_instantiation_extern_p = CLASSTYPE_INTERFACE_ONLY (t); if (!previous_instantiation_extern_p && !extern_p && (complain & tf_error)) permerror (input_location, "duplicate explicit instantiation of %q#T", t); /* If we've already instantiated the template, just return now. */ if (!CLASSTYPE_INTERFACE_ONLY (t)) return; } check_explicit_instantiation_namespace (TYPE_NAME (t)); mark_class_instantiated (t, extern_p); if (nomem_p) return; /* In contrast to implicit instantiation, where only the declarations, and not the definitions, of members are instantiated, we have here: [temp.explicit] The explicit instantiation of a class template specialization implies the instantiation of all of its members not previously explicitly specialized in the translation unit containing the explicit instantiation. Of course, we can't instantiate member template classes, since we don't have any arguments for them. Note that the standard is unclear on whether the instantiation of the members are *explicit* instantiations or not. However, the most natural interpretation is that it should be an explicit instantiation. */ for (tree fld = TYPE_FIELDS (t); fld; fld = DECL_CHAIN (fld)) if ((VAR_P (fld) || (TREE_CODE (fld) == FUNCTION_DECL && !static_p && user_provided_p (fld))) && DECL_TEMPLATE_INSTANTIATION (fld)) { mark_decl_instantiated (fld, extern_p); if (! extern_p) instantiate_decl (fld, /*defer_ok=*/true, /*expl_inst_class_mem_p=*/true); } if (CLASSTYPE_NESTED_UTDS (t)) binding_table_foreach (CLASSTYPE_NESTED_UTDS (t), bt_instantiate_type_proc, &storage); } /* Given a function DECL, which is a specialization of TMPL, modify DECL to be a re-instantiation of TMPL with the same template arguments. TMPL should be the template into which tsubst'ing should occur for DECL, not the most general template. One reason for doing this is a scenario like this: template <class T> void f(const T&, int i); void g() { f(3, 7); } template <class T> void f(const T& t, const int i) { } Note that when the template is first instantiated, with instantiate_template, the resulting DECL will have no name for the first parameter, and the wrong type for the second. So, when we go to instantiate the DECL, we regenerate it. */ static void regenerate_decl_from_template (tree decl, tree tmpl, tree args) { /* The arguments used to instantiate DECL, from the most general template. */ tree code_pattern; code_pattern = DECL_TEMPLATE_RESULT (tmpl); /* Make sure that we can see identifiers, and compute access correctly. */ push_access_scope (decl); if (TREE_CODE (decl) == FUNCTION_DECL) { tree decl_parm; tree pattern_parm; tree specs; int args_depth; int parms_depth; args_depth = TMPL_ARGS_DEPTH (args); parms_depth = TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (tmpl)); if (args_depth > parms_depth) args = get_innermost_template_args (args, parms_depth); specs = tsubst_exception_specification (TREE_TYPE (code_pattern), args, tf_error, NULL_TREE, /*defer_ok*/false); if (specs && specs != error_mark_node) TREE_TYPE (decl) = build_exception_variant (TREE_TYPE (decl), specs); /* Merge parameter declarations. */ decl_parm = skip_artificial_parms_for (decl, DECL_ARGUMENTS (decl)); pattern_parm = skip_artificial_parms_for (code_pattern, DECL_ARGUMENTS (code_pattern)); while (decl_parm && !DECL_PACK_P (pattern_parm)) { tree parm_type; tree attributes; if (DECL_NAME (decl_parm) != DECL_NAME (pattern_parm)) DECL_NAME (decl_parm) = DECL_NAME (pattern_parm); parm_type = tsubst (TREE_TYPE (pattern_parm), args, tf_error, NULL_TREE); parm_type = type_decays_to (parm_type); if (!same_type_p (TREE_TYPE (decl_parm), parm_type)) TREE_TYPE (decl_parm) = parm_type; attributes = DECL_ATTRIBUTES (pattern_parm); if (DECL_ATTRIBUTES (decl_parm) != attributes) { DECL_ATTRIBUTES (decl_parm) = attributes; cplus_decl_attributes (&decl_parm, attributes, /*flags=*/0); } decl_parm = DECL_CHAIN (decl_parm); pattern_parm = DECL_CHAIN (pattern_parm); } /* Merge any parameters that match with the function parameter pack. */ if (pattern_parm && DECL_PACK_P (pattern_parm)) { int i, len; tree expanded_types; /* Expand the TYPE_PACK_EXPANSION that provides the types for the parameters in this function parameter pack. */ expanded_types = tsubst_pack_expansion (TREE_TYPE (pattern_parm), args, tf_error, NULL_TREE); len = TREE_VEC_LENGTH (expanded_types); for (i = 0; i < len; i++) { tree parm_type; tree attributes; if (DECL_NAME (decl_parm) != DECL_NAME (pattern_parm)) /* Rename the parameter to include the index. */ DECL_NAME (decl_parm) = make_ith_pack_parameter_name (DECL_NAME (pattern_parm), i); parm_type = TREE_VEC_ELT (expanded_types, i); parm_type = type_decays_to (parm_type); if (!same_type_p (TREE_TYPE (decl_parm), parm_type)) TREE_TYPE (decl_parm) = parm_type; attributes = DECL_ATTRIBUTES (pattern_parm); if (DECL_ATTRIBUTES (decl_parm) != attributes) { DECL_ATTRIBUTES (decl_parm) = attributes; cplus_decl_attributes (&decl_parm, attributes, /*flags=*/0); } decl_parm = DECL_CHAIN (decl_parm); } } /* Merge additional specifiers from the CODE_PATTERN. */ if (DECL_DECLARED_INLINE_P (code_pattern) && !DECL_DECLARED_INLINE_P (decl)) DECL_DECLARED_INLINE_P (decl) = 1; } else if (VAR_P (decl)) { start_lambda_scope (decl); DECL_INITIAL (decl) = tsubst_expr (DECL_INITIAL (code_pattern), args, tf_error, DECL_TI_TEMPLATE (decl), /*integral_constant_expression_p=*/false); finish_lambda_scope (); if (VAR_HAD_UNKNOWN_BOUND (decl)) TREE_TYPE (decl) = tsubst (TREE_TYPE (code_pattern), args, tf_error, DECL_TI_TEMPLATE (decl)); } else gcc_unreachable (); pop_access_scope (decl); } /* Return the TEMPLATE_DECL into which DECL_TI_ARGS(DECL) should be substituted to get DECL. */ tree template_for_substitution (tree decl) { tree tmpl = DECL_TI_TEMPLATE (decl); /* Set TMPL to the template whose DECL_TEMPLATE_RESULT is the pattern for the instantiation. This is not always the most general template. Consider, for example: template <class T> struct S { template <class U> void f(); template <> void f<int>(); }; and an instantiation of S<double>::f<int>. We want TD to be the specialization S<T>::f<int>, not the more general S<T>::f<U>. */ while (/* An instantiation cannot have a definition, so we need a more general template. */ DECL_TEMPLATE_INSTANTIATION (tmpl) /* We must also deal with friend templates. Given: template <class T> struct S { template <class U> friend void f() {}; }; S<int>::f<U> say, is not an instantiation of S<T>::f<U>, so far as the language is concerned, but that's still where we get the pattern for the instantiation from. On other hand, if the definition comes outside the class, say: template <class T> struct S { template <class U> friend void f(); }; template <class U> friend void f() {} we don't need to look any further. That's what the check for DECL_INITIAL is for. */ || (TREE_CODE (decl) == FUNCTION_DECL && DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (tmpl) && !DECL_INITIAL (DECL_TEMPLATE_RESULT (tmpl)))) { /* The present template, TD, should not be a definition. If it were a definition, we should be using it! Note that we cannot restructure the loop to just keep going until we find a template with a definition, since that might go too far if a specialization was declared, but not defined. */ /* Fetch the more general template. */ tmpl = DECL_TI_TEMPLATE (tmpl); } return tmpl; } /* Returns true if we need to instantiate this template instance even if we know we aren't going to emit it. */ bool always_instantiate_p (tree decl) { /* We always instantiate inline functions so that we can inline them. An explicit instantiation declaration prohibits implicit instantiation of non-inline functions. With high levels of optimization, we would normally inline non-inline functions -- but we're not allowed to do that for "extern template" functions. Therefore, we check DECL_DECLARED_INLINE_P, rather than possibly_inlined_p. */ return ((TREE_CODE (decl) == FUNCTION_DECL && (DECL_DECLARED_INLINE_P (decl) || type_uses_auto (TREE_TYPE (TREE_TYPE (decl))))) /* And we need to instantiate static data members so that their initializers are available in integral constant expressions. */ || (VAR_P (decl) && decl_maybe_constant_var_p (decl))); } /* If FN has a noexcept-specifier that hasn't been instantiated yet, instantiate it now, modifying TREE_TYPE (fn). Returns false on error, true otherwise. */ bool maybe_instantiate_noexcept (tree fn, tsubst_flags_t complain) { tree fntype, spec, noex, clone; /* Don't instantiate a noexcept-specification from template context. */ if (processing_template_decl && (!flag_noexcept_type || type_dependent_expression_p (fn))) return true; if (DECL_CLONED_FUNCTION_P (fn)) fn = DECL_CLONED_FUNCTION (fn); fntype = TREE_TYPE (fn); spec = TYPE_RAISES_EXCEPTIONS (fntype); if (!spec || !TREE_PURPOSE (spec)) return true; noex = TREE_PURPOSE (spec); if (TREE_CODE (noex) == DEFERRED_NOEXCEPT) { static hash_set<tree>* fns = new hash_set<tree>; bool added = false; if (DEFERRED_NOEXCEPT_PATTERN (noex) == NULL_TREE) spec = get_defaulted_eh_spec (fn, complain); else if (!(added = !fns->add (fn))) { /* If hash_set::add returns true, the element was already there. */ location_t loc = EXPR_LOC_OR_LOC (DEFERRED_NOEXCEPT_PATTERN (noex), DECL_SOURCE_LOCATION (fn)); error_at (loc, "exception specification of %qD depends on itself", fn); spec = noexcept_false_spec; } else if (push_tinst_level (fn)) { push_access_scope (fn); push_deferring_access_checks (dk_no_deferred); input_location = DECL_SOURCE_LOCATION (fn); noex = tsubst_copy_and_build (DEFERRED_NOEXCEPT_PATTERN (noex), DEFERRED_NOEXCEPT_ARGS (noex), tf_warning_or_error, fn, /*function_p=*/false, /*integral_constant_expression_p=*/true); spec = build_noexcept_spec (noex, tf_warning_or_error); pop_deferring_access_checks (); pop_access_scope (fn); pop_tinst_level (); if (spec == error_mark_node) spec = noexcept_false_spec; } else spec = noexcept_false_spec; if (added) fns->remove (fn); if (spec == error_mark_node) return false; TREE_TYPE (fn) = build_exception_variant (fntype, spec); } FOR_EACH_CLONE (clone, fn) { if (TREE_TYPE (clone) == fntype) TREE_TYPE (clone) = TREE_TYPE (fn); else TREE_TYPE (clone) = build_exception_variant (TREE_TYPE (clone), spec); } return true; } /* We're starting to process the function INST, an instantiation of PATTERN; add their parameters to local_specializations. */ static void register_parameter_specializations (tree pattern, tree inst) { tree tmpl_parm = DECL_ARGUMENTS (pattern); tree spec_parm = DECL_ARGUMENTS (inst); if (DECL_NONSTATIC_MEMBER_FUNCTION_P (inst)) { register_local_specialization (spec_parm, tmpl_parm); spec_parm = skip_artificial_parms_for (inst, spec_parm); tmpl_parm = skip_artificial_parms_for (pattern, tmpl_parm); } for (; tmpl_parm; tmpl_parm = DECL_CHAIN (tmpl_parm)) { if (!DECL_PACK_P (tmpl_parm)) { register_local_specialization (spec_parm, tmpl_parm); spec_parm = DECL_CHAIN (spec_parm); } else { /* Register the (value) argument pack as a specialization of TMPL_PARM, then move on. */ tree argpack = extract_fnparm_pack (tmpl_parm, &spec_parm); register_local_specialization (argpack, tmpl_parm); } } gcc_assert (!spec_parm); } /* Produce the definition of D, a _DECL generated from a template. If DEFER_OK is true, then we don't have to actually do the instantiation now; we just have to do it sometime. Normally it is an error if this is an explicit instantiation but D is undefined. EXPL_INST_CLASS_MEM_P is true iff D is a member of an explicitly instantiated class template. */ tree instantiate_decl (tree d, bool defer_ok, bool expl_inst_class_mem_p) { tree tmpl = DECL_TI_TEMPLATE (d); tree gen_args; tree args; tree td; tree code_pattern; tree spec; tree gen_tmpl; bool pattern_defined; location_t saved_loc = input_location; int saved_unevaluated_operand = cp_unevaluated_operand; int saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings; bool external_p; bool deleted_p; /* This function should only be used to instantiate templates for functions and static member variables. */ gcc_assert (VAR_OR_FUNCTION_DECL_P (d)); /* A concept is never instantiated. */ gcc_assert (!DECL_DECLARED_CONCEPT_P (d)); /* Variables are never deferred; if instantiation is required, they are instantiated right away. That allows for better code in the case that an expression refers to the value of the variable -- if the variable has a constant value the referring expression can take advantage of that fact. */ if (VAR_P (d)) defer_ok = false; /* Don't instantiate cloned functions. Instead, instantiate the functions they cloned. */ if (TREE_CODE (d) == FUNCTION_DECL && DECL_CLONED_FUNCTION_P (d)) d = DECL_CLONED_FUNCTION (d); if (DECL_TEMPLATE_INSTANTIATED (d) || (TREE_CODE (d) == FUNCTION_DECL && DECL_DEFAULTED_FN (d) && DECL_INITIAL (d)) || DECL_TEMPLATE_SPECIALIZATION (d)) /* D has already been instantiated or explicitly specialized, so there's nothing for us to do here. It might seem reasonable to check whether or not D is an explicit instantiation, and, if so, stop here. But when an explicit instantiation is deferred until the end of the compilation, DECL_EXPLICIT_INSTANTIATION is set, even though we still need to do the instantiation. */ return d; /* Check to see whether we know that this template will be instantiated in some other file, as with "extern template" extension. */ external_p = (DECL_INTERFACE_KNOWN (d) && DECL_REALLY_EXTERN (d)); /* In general, we do not instantiate such templates. */ if (external_p && !always_instantiate_p (d)) return d; gen_tmpl = most_general_template (tmpl); gen_args = DECL_TI_ARGS (d); if (tmpl != gen_tmpl) /* We should already have the extra args. */ gcc_assert (TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (gen_tmpl)) == TMPL_ARGS_DEPTH (gen_args)); /* And what's in the hash table should match D. */ gcc_assert ((spec = retrieve_specialization (gen_tmpl, gen_args, 0)) == d || spec == NULL_TREE); /* This needs to happen before any tsubsting. */ if (! push_tinst_level (d)) return d; timevar_push (TV_TEMPLATE_INST); /* Set TD to the template whose DECL_TEMPLATE_RESULT is the pattern for the instantiation. */ td = template_for_substitution (d); args = gen_args; if (VAR_P (d)) { /* Look up an explicit specialization, if any. */ tree tid = lookup_template_variable (gen_tmpl, gen_args); tree elt = most_specialized_partial_spec (tid, tf_warning_or_error); if (elt && elt != error_mark_node) { td = TREE_VALUE (elt); args = TREE_PURPOSE (elt); } } code_pattern = DECL_TEMPLATE_RESULT (td); /* We should never be trying to instantiate a member of a class template or partial specialization. */ gcc_assert (d != code_pattern); if ((DECL_NAMESPACE_SCOPE_P (d) && !DECL_INITIALIZED_IN_CLASS_P (d)) || DECL_TEMPLATE_SPECIALIZATION (td)) /* In the case of a friend template whose definition is provided outside the class, we may have too many arguments. Drop the ones we don't need. The same is true for specializations. */ args = get_innermost_template_args (args, TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (td))); if (TREE_CODE (d) == FUNCTION_DECL) { deleted_p = DECL_DELETED_FN (code_pattern); pattern_defined = ((DECL_SAVED_TREE (code_pattern) != NULL_TREE && DECL_INITIAL (code_pattern) != error_mark_node) || DECL_DEFAULTED_OUTSIDE_CLASS_P (code_pattern) || deleted_p); } else { deleted_p = false; if (DECL_CLASS_SCOPE_P (code_pattern)) pattern_defined = (! DECL_IN_AGGR_P (code_pattern) || DECL_INLINE_VAR_P (code_pattern)); else pattern_defined = ! DECL_EXTERNAL (code_pattern); } /* We may be in the middle of deferred access check. Disable it now. */ push_deferring_access_checks (dk_no_deferred); /* Unless an explicit instantiation directive has already determined the linkage of D, remember that a definition is available for this entity. */ if (pattern_defined && !DECL_INTERFACE_KNOWN (d) && !DECL_NOT_REALLY_EXTERN (d)) mark_definable (d); DECL_SOURCE_LOCATION (td) = DECL_SOURCE_LOCATION (code_pattern); DECL_SOURCE_LOCATION (d) = DECL_SOURCE_LOCATION (code_pattern); input_location = DECL_SOURCE_LOCATION (d); /* If D is a member of an explicitly instantiated class template, and no definition is available, treat it like an implicit instantiation. */ if (!pattern_defined && expl_inst_class_mem_p && DECL_EXPLICIT_INSTANTIATION (d)) { /* Leave linkage flags alone on instantiations with anonymous visibility. */ if (TREE_PUBLIC (d)) { DECL_NOT_REALLY_EXTERN (d) = 0; DECL_INTERFACE_KNOWN (d) = 0; } SET_DECL_IMPLICIT_INSTANTIATION (d); } /* Defer all other templates, unless we have been explicitly forbidden from doing so. */ if (/* If there is no definition, we cannot instantiate the template. */ ! pattern_defined /* If it's OK to postpone instantiation, do so. */ || defer_ok /* If this is a static data member that will be defined elsewhere, we don't want to instantiate the entire data member, but we do want to instantiate the initializer so that we can substitute that elsewhere. */ || (external_p && VAR_P (d)) /* Handle here a deleted function too, avoid generating its body (c++/61080). */ || deleted_p) { /* The definition of the static data member is now required so we must substitute the initializer. */ if (VAR_P (d) && !DECL_INITIAL (d) && DECL_INITIAL (code_pattern)) { tree ns; tree init; bool const_init = false; bool enter_context = DECL_CLASS_SCOPE_P (d); ns = decl_namespace_context (d); push_nested_namespace (ns); if (enter_context) push_nested_class (DECL_CONTEXT (d)); init = tsubst_expr (DECL_INITIAL (code_pattern), args, tf_warning_or_error, NULL_TREE, /*integral_constant_expression_p=*/false); /* If instantiating the initializer involved instantiating this again, don't call cp_finish_decl twice. */ if (!DECL_INITIAL (d)) { /* Make sure the initializer is still constant, in case of circular dependency (template/instantiate6.C). */ const_init = DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (code_pattern); cp_finish_decl (d, init, /*init_const_expr_p=*/const_init, /*asmspec_tree=*/NULL_TREE, LOOKUP_ONLYCONVERTING); } if (enter_context) pop_nested_class (); pop_nested_namespace (ns); } /* We restore the source position here because it's used by add_pending_template. */ input_location = saved_loc; if (at_eof && !pattern_defined && DECL_EXPLICIT_INSTANTIATION (d) && DECL_NOT_REALLY_EXTERN (d)) /* [temp.explicit] The definition of a non-exported function template, a non-exported member function template, or a non-exported member function or static data member of a class template shall be present in every translation unit in which it is explicitly instantiated. */ permerror (input_location, "explicit instantiation of %qD " "but no definition available", d); /* If we're in unevaluated context, we just wanted to get the constant value; this isn't an odr use, so don't queue a full instantiation. */ if (cp_unevaluated_operand != 0) goto out; /* ??? Historically, we have instantiated inline functions, even when marked as "extern template". */ if (!(external_p && VAR_P (d))) add_pending_template (d); goto out; } /* Tell the repository that D is available in this translation unit -- and see if it is supposed to be instantiated here. */ if (TREE_PUBLIC (d) && !DECL_REALLY_EXTERN (d) && !repo_emit_p (d)) { /* In a PCH file, despite the fact that the repository hasn't requested instantiation in the PCH it is still possible that an instantiation will be required in a file that includes the PCH. */ if (pch_file) add_pending_template (d); /* Instantiate inline functions so that the inliner can do its job, even though we'll not be emitting a copy of this function. */ if (!(TREE_CODE (d) == FUNCTION_DECL && possibly_inlined_p (d))) goto out; } bool push_to_top, nested; tree fn_context; fn_context = decl_function_context (d); if (LAMBDA_FUNCTION_P (d)) /* tsubst_lambda_expr resolved any references to enclosing functions. */ fn_context = NULL_TREE; nested = current_function_decl != NULL_TREE; push_to_top = !(nested && fn_context == current_function_decl); vec<tree> omp_privatization_save; if (nested) save_omp_privatization_clauses (omp_privatization_save); if (push_to_top) push_to_top_level (); else { push_function_context (); cp_unevaluated_operand = 0; c_inhibit_evaluation_warnings = 0; } /* Mark D as instantiated so that recursive calls to instantiate_decl do not try to instantiate it again. */ DECL_TEMPLATE_INSTANTIATED (d) = 1; /* Regenerate the declaration in case the template has been modified by a subsequent redeclaration. */ regenerate_decl_from_template (d, td, args); /* We already set the file and line above. Reset them now in case they changed as a result of calling regenerate_decl_from_template. */ input_location = DECL_SOURCE_LOCATION (d); if (VAR_P (d)) { tree init; bool const_init = false; /* Clear out DECL_RTL; whatever was there before may not be right since we've reset the type of the declaration. */ SET_DECL_RTL (d, NULL); DECL_IN_AGGR_P (d) = 0; /* The initializer is placed in DECL_INITIAL by regenerate_decl_from_template so we don't need to push/pop_access_scope again here. Pull it out so that cp_finish_decl can process it. */ init = DECL_INITIAL (d); DECL_INITIAL (d) = NULL_TREE; DECL_INITIALIZED_P (d) = 0; /* Clear DECL_EXTERNAL so that cp_finish_decl will process the initializer. That function will defer actual emission until we have a chance to determine linkage. */ DECL_EXTERNAL (d) = 0; /* Enter the scope of D so that access-checking works correctly. */ bool enter_context = DECL_CLASS_SCOPE_P (d); if (enter_context) push_nested_class (DECL_CONTEXT (d)); const_init = DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (code_pattern); cp_finish_decl (d, init, const_init, NULL_TREE, 0); if (enter_context) pop_nested_class (); if (variable_template_p (gen_tmpl)) note_variable_template_instantiation (d); } else if (TREE_CODE (d) == FUNCTION_DECL && DECL_DEFAULTED_FN (code_pattern)) synthesize_method (d); else if (TREE_CODE (d) == FUNCTION_DECL) { /* Set up the list of local specializations. */ local_specialization_stack lss (push_to_top ? lss_blank : lss_copy); tree block = NULL_TREE; /* Set up context. */ if (DECL_OMP_DECLARE_REDUCTION_P (code_pattern) && TREE_CODE (DECL_CONTEXT (code_pattern)) == FUNCTION_DECL) block = push_stmt_list (); else start_preparsed_function (d, NULL_TREE, SF_PRE_PARSED); /* Some typedefs referenced from within the template code need to be access checked at template instantiation time, i.e now. These types were added to the template at parsing time. Let's get those and perform the access checks then. */ perform_typedefs_access_check (DECL_TEMPLATE_RESULT (td), args); /* Create substitution entries for the parameters. */ register_parameter_specializations (code_pattern, d); /* Substitute into the body of the function. */ if (DECL_OMP_DECLARE_REDUCTION_P (code_pattern)) tsubst_omp_udr (DECL_SAVED_TREE (code_pattern), args, tf_warning_or_error, tmpl); else { tsubst_expr (DECL_SAVED_TREE (code_pattern), args, tf_warning_or_error, tmpl, /*integral_constant_expression_p=*/false); /* Set the current input_location to the end of the function so that finish_function knows where we are. */ input_location = DECL_STRUCT_FUNCTION (code_pattern)->function_end_locus; /* Remember if we saw an infinite loop in the template. */ current_function_infinite_loop = DECL_STRUCT_FUNCTION (code_pattern)->language->infinite_loop; } /* Finish the function. */ if (DECL_OMP_DECLARE_REDUCTION_P (code_pattern) && TREE_CODE (DECL_CONTEXT (code_pattern)) == FUNCTION_DECL) DECL_SAVED_TREE (d) = pop_stmt_list (block); else { d = finish_function (/*inline_p=*/false); expand_or_defer_fn (d); } if (DECL_OMP_DECLARE_REDUCTION_P (code_pattern)) cp_check_omp_declare_reduction (d); } /* We're not deferring instantiation any more. */ TI_PENDING_TEMPLATE_FLAG (DECL_TEMPLATE_INFO (d)) = 0; if (push_to_top) pop_from_top_level (); else pop_function_context (); if (nested) restore_omp_privatization_clauses (omp_privatization_save); out: pop_deferring_access_checks (); timevar_pop (TV_TEMPLATE_INST); pop_tinst_level (); input_location = saved_loc; cp_unevaluated_operand = saved_unevaluated_operand; c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings; return d; } /* Run through the list of templates that we wish we could instantiate, and instantiate any we can. RETRIES is the number of times we retry pending template instantiation. */ void instantiate_pending_templates (int retries) { int reconsider; location_t saved_loc = input_location; /* Instantiating templates may trigger vtable generation. This in turn may require further template instantiations. We place a limit here to avoid infinite loop. */ if (pending_templates && retries >= max_tinst_depth) { tree decl = pending_templates->tinst->maybe_get_node (); fatal_error (input_location, "template instantiation depth exceeds maximum of %d" " instantiating %q+D, possibly from virtual table generation" " (use -ftemplate-depth= to increase the maximum)", max_tinst_depth, decl); if (TREE_CODE (decl) == FUNCTION_DECL) /* Pretend that we defined it. */ DECL_INITIAL (decl) = error_mark_node; return; } do { struct pending_template **t = &pending_templates; struct pending_template *last = NULL; reconsider = 0; while (*t) { tree instantiation = reopen_tinst_level ((*t)->tinst); bool complete = false; if (TYPE_P (instantiation)) { if (!COMPLETE_TYPE_P (instantiation)) { instantiate_class_template (instantiation); if (CLASSTYPE_TEMPLATE_INSTANTIATION (instantiation)) for (tree fld = TYPE_FIELDS (instantiation); fld; fld = TREE_CHAIN (fld)) if ((VAR_P (fld) || (TREE_CODE (fld) == FUNCTION_DECL && !DECL_ARTIFICIAL (fld))) && DECL_TEMPLATE_INSTANTIATION (fld)) instantiate_decl (fld, /*defer_ok=*/false, /*expl_inst_class_mem_p=*/false); if (COMPLETE_TYPE_P (instantiation)) reconsider = 1; } complete = COMPLETE_TYPE_P (instantiation); } else { if (!DECL_TEMPLATE_SPECIALIZATION (instantiation) && !DECL_TEMPLATE_INSTANTIATED (instantiation)) { instantiation = instantiate_decl (instantiation, /*defer_ok=*/false, /*expl_inst_class_mem_p=*/false); if (DECL_TEMPLATE_INSTANTIATED (instantiation)) reconsider = 1; } complete = (DECL_TEMPLATE_SPECIALIZATION (instantiation) || DECL_TEMPLATE_INSTANTIATED (instantiation)); } if (complete) { /* If INSTANTIATION has been instantiated, then we don't need to consider it again in the future. */ struct pending_template *drop = *t; *t = (*t)->next; set_refcount_ptr (drop->tinst); pending_template_freelist ().free (drop); } else { last = *t; t = &(*t)->next; } tinst_depth = 0; set_refcount_ptr (current_tinst_level); } last_pending_template = last; } while (reconsider); input_location = saved_loc; } /* Substitute ARGVEC into T, which is a list of initializers for either base class or a non-static data member. The TREE_PURPOSEs are DECLs, and the TREE_VALUEs are the initializer values. Used by instantiate_decl. */ static tree tsubst_initializer_list (tree t, tree argvec) { tree inits = NULL_TREE; tree target_ctor = error_mark_node; for (; t; t = TREE_CHAIN (t)) { tree decl; tree init; tree expanded_bases = NULL_TREE; tree expanded_arguments = NULL_TREE; int i, len = 1; if (TREE_CODE (TREE_PURPOSE (t)) == TYPE_PACK_EXPANSION) { tree expr; tree arg; /* Expand the base class expansion type into separate base classes. */ expanded_bases = tsubst_pack_expansion (TREE_PURPOSE (t), argvec, tf_warning_or_error, NULL_TREE); if (expanded_bases == error_mark_node) continue; /* We'll be building separate TREE_LISTs of arguments for each base. */ len = TREE_VEC_LENGTH (expanded_bases); expanded_arguments = make_tree_vec (len); for (i = 0; i < len; i++) TREE_VEC_ELT (expanded_arguments, i) = NULL_TREE; /* Build a dummy EXPR_PACK_EXPANSION that will be used to expand each argument in the TREE_VALUE of t. */ expr = make_node (EXPR_PACK_EXPANSION); PACK_EXPANSION_LOCAL_P (expr) = true; PACK_EXPANSION_PARAMETER_PACKS (expr) = PACK_EXPANSION_PARAMETER_PACKS (TREE_PURPOSE (t)); if (TREE_VALUE (t) == void_type_node) /* VOID_TYPE_NODE is used to indicate value-initialization. */ { for (i = 0; i < len; i++) TREE_VEC_ELT (expanded_arguments, i) = void_type_node; } else { /* Substitute parameter packs into each argument in the TREE_LIST. */ in_base_initializer = 1; for (arg = TREE_VALUE (t); arg; arg = TREE_CHAIN (arg)) { tree expanded_exprs; /* Expand the argument. */ SET_PACK_EXPANSION_PATTERN (expr, TREE_VALUE (arg)); expanded_exprs = tsubst_pack_expansion (expr, argvec, tf_warning_or_error, NULL_TREE); if (expanded_exprs == error_mark_node) continue; /* Prepend each of the expanded expressions to the corresponding TREE_LIST in EXPANDED_ARGUMENTS. */ for (i = 0; i < len; i++) { TREE_VEC_ELT (expanded_arguments, i) = tree_cons (NULL_TREE, TREE_VEC_ELT (expanded_exprs, i), TREE_VEC_ELT (expanded_arguments, i)); } } in_base_initializer = 0; /* Reverse all of the TREE_LISTs in EXPANDED_ARGUMENTS, since we built them backwards. */ for (i = 0; i < len; i++) { TREE_VEC_ELT (expanded_arguments, i) = nreverse (TREE_VEC_ELT (expanded_arguments, i)); } } } for (i = 0; i < len; ++i) { if (expanded_bases) { decl = TREE_VEC_ELT (expanded_bases, i); decl = expand_member_init (decl); init = TREE_VEC_ELT (expanded_arguments, i); } else { tree tmp; decl = tsubst_copy (TREE_PURPOSE (t), argvec, tf_warning_or_error, NULL_TREE); decl = expand_member_init (decl); if (decl && !DECL_P (decl)) in_base_initializer = 1; init = TREE_VALUE (t); tmp = init; if (init != void_type_node) init = tsubst_expr (init, argvec, tf_warning_or_error, NULL_TREE, /*integral_constant_expression_p=*/false); if (init == NULL_TREE && tmp != NULL_TREE) /* If we had an initializer but it instantiated to nothing, value-initialize the object. This will only occur when the initializer was a pack expansion where the parameter packs used in that expansion were of length zero. */ init = void_type_node; in_base_initializer = 0; } if (target_ctor != error_mark_node && init != error_mark_node) { error ("mem-initializer for %qD follows constructor delegation", decl); return inits; } /* Look for a target constructor. */ if (init != error_mark_node && decl && CLASS_TYPE_P (decl) && same_type_p (decl, current_class_type)) { maybe_warn_cpp0x (CPP0X_DELEGATING_CTORS); if (inits) { error ("constructor delegation follows mem-initializer for %qD", TREE_PURPOSE (inits)); continue; } target_ctor = init; } if (decl) { init = build_tree_list (decl, init); TREE_CHAIN (init) = inits; inits = init; } } } return inits; } /* Set CURRENT_ACCESS_SPECIFIER based on the protection of DECL. */ static void set_current_access_from_decl (tree decl) { if (TREE_PRIVATE (decl)) current_access_specifier = access_private_node; else if (TREE_PROTECTED (decl)) current_access_specifier = access_protected_node; else current_access_specifier = access_public_node; } /* Instantiate an enumerated type. TAG is the template type, NEWTAG is the instantiation (which should have been created with start_enum) and ARGS are the template arguments to use. */ static void tsubst_enum (tree tag, tree newtag, tree args) { tree e; if (SCOPED_ENUM_P (newtag)) begin_scope (sk_scoped_enum, newtag); for (e = TYPE_VALUES (tag); e; e = TREE_CHAIN (e)) { tree value; tree decl; decl = TREE_VALUE (e); /* Note that in a template enum, the TREE_VALUE is the CONST_DECL, not the corresponding INTEGER_CST. */ value = tsubst_expr (DECL_INITIAL (decl), args, tf_warning_or_error, NULL_TREE, /*integral_constant_expression_p=*/true); /* Give this enumeration constant the correct access. */ set_current_access_from_decl (decl); /* Actually build the enumerator itself. Here we're assuming that enumerators can't have dependent attributes. */ build_enumerator (DECL_NAME (decl), value, newtag, DECL_ATTRIBUTES (decl), DECL_SOURCE_LOCATION (decl)); } if (SCOPED_ENUM_P (newtag)) finish_scope (); finish_enum_value_list (newtag); finish_enum (newtag); DECL_SOURCE_LOCATION (TYPE_NAME (newtag)) = DECL_SOURCE_LOCATION (TYPE_NAME (tag)); } /* DECL is a FUNCTION_DECL that is a template specialization. Return its type -- but without substituting the innermost set of template arguments. So, innermost set of template parameters will appear in the type. */ tree get_mostly_instantiated_function_type (tree decl) { /* For a function, DECL_TI_TEMPLATE is partially instantiated. */ return TREE_TYPE (DECL_TI_TEMPLATE (decl)); } /* Return truthvalue if we're processing a template different from the last one involved in diagnostics. */ bool problematic_instantiation_changed (void) { return current_tinst_level != last_error_tinst_level; } /* Remember current template involved in diagnostics. */ void record_last_problematic_instantiation (void) { set_refcount_ptr (last_error_tinst_level, current_tinst_level); } struct tinst_level * current_instantiation (void) { return current_tinst_level; } /* Return TRUE if current_function_decl is being instantiated, false otherwise. */ bool instantiating_current_function_p (void) { return (current_instantiation () && (current_instantiation ()->maybe_get_node () == current_function_decl)); } /* [temp.param] Check that template non-type parm TYPE is of an allowable type. Return false for ok, true for disallowed. Issue error and inform messages under control of COMPLAIN. */ static bool invalid_nontype_parm_type_p (tree type, tsubst_flags_t complain) { if (INTEGRAL_OR_ENUMERATION_TYPE_P (type)) return false; else if (TYPE_PTR_P (type)) return false; else if (TREE_CODE (type) == REFERENCE_TYPE && !TYPE_REF_IS_RVALUE (type)) return false; else if (TYPE_PTRMEM_P (type)) return false; else if (TREE_CODE (type) == TEMPLATE_TYPE_PARM) return false; else if (TREE_CODE (type) == TYPENAME_TYPE) return false; else if (TREE_CODE (type) == DECLTYPE_TYPE) return false; else if (TREE_CODE (type) == NULLPTR_TYPE) return false; /* A bound template template parm could later be instantiated to have a valid nontype parm type via an alias template. */ else if (cxx_dialect >= cxx11 && TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM) return false; if (complain & tf_error) { if (type == error_mark_node) inform (input_location, "invalid template non-type parameter"); else error ("%q#T is not a valid type for a template non-type parameter", type); } return true; } /* Returns TRUE if TYPE is dependent, in the sense of [temp.dep.type]. Assumes that TYPE really is a type, and not the ERROR_MARK_NODE.*/ static bool dependent_type_p_r (tree type) { tree scope; /* [temp.dep.type] A type is dependent if it is: -- a template parameter. Template template parameters are types for us (since TYPE_P holds true for them) so we handle them here. */ if (TREE_CODE (type) == TEMPLATE_TYPE_PARM || TREE_CODE (type) == TEMPLATE_TEMPLATE_PARM) return true; /* -- a qualified-id with a nested-name-specifier which contains a class-name that names a dependent type or whose unqualified-id names a dependent type. */ if (TREE_CODE (type) == TYPENAME_TYPE) return true; /* An alias template specialization can be dependent even if the resulting type is not. */ if (dependent_alias_template_spec_p (type)) return true; /* -- a cv-qualified type where the cv-unqualified type is dependent. No code is necessary for this bullet; the code below handles cv-qualified types, and we don't want to strip aliases with TYPE_MAIN_VARIANT because of DR 1558. */ /* -- a compound type constructed from any dependent type. */ if (TYPE_PTRMEM_P (type)) return (dependent_type_p (TYPE_PTRMEM_CLASS_TYPE (type)) || dependent_type_p (TYPE_PTRMEM_POINTED_TO_TYPE (type))); else if (TYPE_PTR_P (type) || TREE_CODE (type) == REFERENCE_TYPE) return dependent_type_p (TREE_TYPE (type)); else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE) { tree arg_type; if (dependent_type_p (TREE_TYPE (type))) return true; for (arg_type = TYPE_ARG_TYPES (type); arg_type; arg_type = TREE_CHAIN (arg_type)) if (dependent_type_p (TREE_VALUE (arg_type))) return true; if (cxx_dialect >= cxx17) /* A value-dependent noexcept-specifier makes the type dependent. */ if (tree spec = TYPE_RAISES_EXCEPTIONS (type)) if (tree noex = TREE_PURPOSE (spec)) /* Treat DEFERRED_NOEXCEPT as non-dependent, since it doesn't affect overload resolution and treating it as dependent breaks things. */ if (TREE_CODE (noex) != DEFERRED_NOEXCEPT && value_dependent_expression_p (noex)) return true; return false; } /* -- an array type constructed from any dependent type or whose size is specified by a constant expression that is value-dependent. We checked for type- and value-dependence of the bounds in compute_array_index_type, so TYPE_DEPENDENT_P is already set. */ if (TREE_CODE (type) == ARRAY_TYPE) { if (TYPE_DOMAIN (type) && dependent_type_p (TYPE_DOMAIN (type))) return true; return dependent_type_p (TREE_TYPE (type)); } /* -- a template-id in which either the template name is a template parameter ... */ if (TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM) return true; /* ... or any of the template arguments is a dependent type or an expression that is type-dependent or value-dependent. */ else if (CLASS_TYPE_P (type) && CLASSTYPE_TEMPLATE_INFO (type) && (any_dependent_template_arguments_p (INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (type))))) return true; /* All TYPEOF_TYPEs, DECLTYPE_TYPEs, and UNDERLYING_TYPEs are dependent; if the argument of the `typeof' expression is not type-dependent, then it should already been have resolved. */ if (TREE_CODE (type) == TYPEOF_TYPE || TREE_CODE (type) == DECLTYPE_TYPE || TREE_CODE (type) == UNDERLYING_TYPE) return true; /* A template argument pack is dependent if any of its packed arguments are. */ if (TREE_CODE (type) == TYPE_ARGUMENT_PACK) { tree args = ARGUMENT_PACK_ARGS (type); int i, len = TREE_VEC_LENGTH (args); for (i = 0; i < len; ++i) if (dependent_template_arg_p (TREE_VEC_ELT (args, i))) return true; } /* All TYPE_PACK_EXPANSIONs are dependent, because parameter packs must be template parameters. */ if (TREE_CODE (type) == TYPE_PACK_EXPANSION) return true; if (any_dependent_type_attributes_p (TYPE_ATTRIBUTES (type))) return true; /* The standard does not specifically mention types that are local to template functions or local classes, but they should be considered dependent too. For example: template <int I> void f() { enum E { a = I }; S<sizeof (E)> s; } The size of `E' cannot be known until the value of `I' has been determined. Therefore, `E' must be considered dependent. */ scope = TYPE_CONTEXT (type); if (scope && TYPE_P (scope)) return dependent_type_p (scope); /* Don't use type_dependent_expression_p here, as it can lead to infinite recursion trying to determine whether a lambda nested in a lambda is dependent (c++/47687). */ else if (scope && TREE_CODE (scope) == FUNCTION_DECL && DECL_LANG_SPECIFIC (scope) && DECL_TEMPLATE_INFO (scope) && (any_dependent_template_arguments_p (INNERMOST_TEMPLATE_ARGS (DECL_TI_ARGS (scope))))) return true; /* Other types are non-dependent. */ return false; } /* Returns TRUE if TYPE is dependent, in the sense of [temp.dep.type]. Note that a NULL type is considered dependent. */ bool dependent_type_p (tree type) { /* If there are no template parameters in scope, then there can't be any dependent types. */ if (!processing_template_decl) { /* If we are not processing a template, then nobody should be providing us with a dependent type. */ gcc_assert (type); gcc_assert (TREE_CODE (type) != TEMPLATE_TYPE_PARM || is_auto (type)); return false; } /* If the type is NULL, we have not computed a type for the entity in question; in that case, the type is dependent. */ if (!type) return true; /* Erroneous types can be considered non-dependent. */ if (type == error_mark_node) return false; /* Getting here with global_type_node means we improperly called this function on the TREE_TYPE of an IDENTIFIER_NODE. */ gcc_checking_assert (type != global_type_node); /* If we have not already computed the appropriate value for TYPE, do so now. */ if (!TYPE_DEPENDENT_P_VALID (type)) { TYPE_DEPENDENT_P (type) = dependent_type_p_r (type); TYPE_DEPENDENT_P_VALID (type) = 1; } return TYPE_DEPENDENT_P (type); } /* Returns TRUE if SCOPE is a dependent scope, in which we can't do any lookup. In other words, a dependent type that is not the current instantiation. */ bool dependent_scope_p (tree scope) { return (scope && TYPE_P (scope) && dependent_type_p (scope) && !currently_open_class (scope)); } /* T is a SCOPE_REF. Return whether it represents a non-static member of an unknown base of 'this' (and is therefore instantiation-dependent). */ static bool unknown_base_ref_p (tree t) { if (!current_class_ptr) return false; tree mem = TREE_OPERAND (t, 1); if (shared_member_p (mem)) return false; tree cur = current_nonlambda_class_type (); if (!any_dependent_bases_p (cur)) return false; tree ctx = TREE_OPERAND (t, 0); if (DERIVED_FROM_P (ctx, cur)) return false; return true; } /* T is a SCOPE_REF; return whether we need to consider it instantiation-dependent so that we can check access at instantiation time even though we know which member it resolves to. */ static bool instantiation_dependent_scope_ref_p (tree t) { if (DECL_P (TREE_OPERAND (t, 1)) && CLASS_TYPE_P (TREE_OPERAND (t, 0)) && !unknown_base_ref_p (t) && accessible_in_template_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1))) return false; else return true; } /* Returns TRUE if the EXPRESSION is value-dependent, in the sense of [temp.dep.constexpr]. EXPRESSION is already known to be a constant expression. */ /* Note that this predicate is not appropriate for general expressions; only constant expressions (that satisfy potential_constant_expression) can be tested for value dependence. */ bool value_dependent_expression_p (tree expression) { if (!processing_template_decl || expression == NULL_TREE) return false; /* A type-dependent expression is also value-dependent. */ if (type_dependent_expression_p (expression)) return true; switch (TREE_CODE (expression)) { case BASELINK: /* A dependent member function of the current instantiation. */ return dependent_type_p (BINFO_TYPE (BASELINK_BINFO (expression))); case FUNCTION_DECL: /* A dependent member function of the current instantiation. */ if (DECL_CLASS_SCOPE_P (expression) && dependent_type_p (DECL_CONTEXT (expression))) return true; break; case IDENTIFIER_NODE: /* A name that has not been looked up -- must be dependent. */ return true; case TEMPLATE_PARM_INDEX: /* A non-type template parm. */ return true; case CONST_DECL: /* A non-type template parm. */ if (DECL_TEMPLATE_PARM_P (expression)) return true; return value_dependent_expression_p (DECL_INITIAL (expression)); case VAR_DECL: /* A constant with literal type and is initialized with an expression that is value-dependent. */ if (DECL_DEPENDENT_INIT_P (expression) /* FIXME cp_finish_decl doesn't fold reference initializers. */ || TREE_CODE (TREE_TYPE (expression)) == REFERENCE_TYPE) return true; if (DECL_HAS_VALUE_EXPR_P (expression)) { tree value_expr = DECL_VALUE_EXPR (expression); if (value_dependent_expression_p (value_expr)) return true; } return false; case DYNAMIC_CAST_EXPR: case STATIC_CAST_EXPR: case CONST_CAST_EXPR: case REINTERPRET_CAST_EXPR: case CAST_EXPR: case IMPLICIT_CONV_EXPR: /* These expressions are value-dependent if the type to which the cast occurs is dependent or the expression being casted is value-dependent. */ { tree type = TREE_TYPE (expression); if (dependent_type_p (type)) return true; /* A functional cast has a list of operands. */ expression = TREE_OPERAND (expression, 0); if (!expression) { /* If there are no operands, it must be an expression such as "int()". This should not happen for aggregate types because it would form non-constant expressions. */ gcc_assert (cxx_dialect >= cxx11 || INTEGRAL_OR_ENUMERATION_TYPE_P (type)); return false; } if (TREE_CODE (expression) == TREE_LIST) return any_value_dependent_elements_p (expression); return value_dependent_expression_p (expression); } case SIZEOF_EXPR: if (SIZEOF_EXPR_TYPE_P (expression)) return dependent_type_p (TREE_TYPE (TREE_OPERAND (expression, 0))); /* FALLTHRU */ case ALIGNOF_EXPR: case TYPEID_EXPR: /* A `sizeof' expression is value-dependent if the operand is type-dependent or is a pack expansion. */ expression = TREE_OPERAND (expression, 0); if (PACK_EXPANSION_P (expression)) return true; else if (TYPE_P (expression)) return dependent_type_p (expression); return instantiation_dependent_uneval_expression_p (expression); case AT_ENCODE_EXPR: /* An 'encode' expression is value-dependent if the operand is type-dependent. */ expression = TREE_OPERAND (expression, 0); return dependent_type_p (expression); case NOEXCEPT_EXPR: expression = TREE_OPERAND (expression, 0); return instantiation_dependent_uneval_expression_p (expression); case SCOPE_REF: /* All instantiation-dependent expressions should also be considered value-dependent. */ return instantiation_dependent_scope_ref_p (expression); case COMPONENT_REF: return (value_dependent_expression_p (TREE_OPERAND (expression, 0)) || value_dependent_expression_p (TREE_OPERAND (expression, 1))); case NONTYPE_ARGUMENT_PACK: /* A NONTYPE_ARGUMENT_PACK is value-dependent if any packed argument is value-dependent. */ { tree values = ARGUMENT_PACK_ARGS (expression); int i, len = TREE_VEC_LENGTH (values); for (i = 0; i < len; ++i) if (value_dependent_expression_p (TREE_VEC_ELT (values, i))) return true; return false; } case TRAIT_EXPR: { tree type2 = TRAIT_EXPR_TYPE2 (expression); if (dependent_type_p (TRAIT_EXPR_TYPE1 (expression))) return true; if (!type2) return false; if (TREE_CODE (type2) != TREE_LIST) return dependent_type_p (type2); for (; type2; type2 = TREE_CHAIN (type2)) if (dependent_type_p (TREE_VALUE (type2))) return true; return false; } case MODOP_EXPR: return ((value_dependent_expression_p (TREE_OPERAND (expression, 0))) || (value_dependent_expression_p (TREE_OPERAND (expression, 2)))); case ARRAY_REF: return ((value_dependent_expression_p (TREE_OPERAND (expression, 0))) || (value_dependent_expression_p (TREE_OPERAND (expression, 1)))); case ADDR_EXPR: { tree op = TREE_OPERAND (expression, 0); return (value_dependent_expression_p (op) || has_value_dependent_address (op)); } case REQUIRES_EXPR: /* Treat all requires-expressions as value-dependent so we don't try to fold them. */ return true; case TYPE_REQ: return dependent_type_p (TREE_OPERAND (expression, 0)); case CALL_EXPR: { if (value_dependent_expression_p (CALL_EXPR_FN (expression))) return true; tree fn = get_callee_fndecl (expression); int i, nargs; nargs = call_expr_nargs (expression); for (i = 0; i < nargs; ++i) { tree op = CALL_EXPR_ARG (expression, i); /* In a call to a constexpr member function, look through the implicit ADDR_EXPR on the object argument so that it doesn't cause the call to be considered value-dependent. We also look through it in potential_constant_expression. */ if (i == 0 && fn && DECL_DECLARED_CONSTEXPR_P (fn) && DECL_NONSTATIC_MEMBER_FUNCTION_P (fn) && TREE_CODE (op) == ADDR_EXPR) op = TREE_OPERAND (op, 0); if (value_dependent_expression_p (op)) return true; } return false; } case TEMPLATE_ID_EXPR: return variable_concept_p (TREE_OPERAND (expression, 0)); case CONSTRUCTOR: { unsigned ix; tree val; if (dependent_type_p (TREE_TYPE (expression))) return true; FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (expression), ix, val) if (value_dependent_expression_p (val)) return true; return false; } case STMT_EXPR: /* Treat a GNU statement expression as dependent to avoid crashing under instantiate_non_dependent_expr; it can't be constant. */ return true; default: /* A constant expression is value-dependent if any subexpression is value-dependent. */ switch (TREE_CODE_CLASS (TREE_CODE (expression))) { case tcc_reference: case tcc_unary: case tcc_comparison: case tcc_binary: case tcc_expression: case tcc_vl_exp: { int i, len = cp_tree_operand_length (expression); for (i = 0; i < len; i++) { tree t = TREE_OPERAND (expression, i); /* In some cases, some of the operands may be missing. (For example, in the case of PREDECREMENT_EXPR, the amount to increment by may be missing.) That doesn't make the expression dependent. */ if (t && value_dependent_expression_p (t)) return true; } } break; default: break; } break; } /* The expression is not value-dependent. */ return false; } /* Returns TRUE if the EXPRESSION is type-dependent, in the sense of [temp.dep.expr]. Note that an expression with no type is considered dependent. Other parts of the compiler arrange for an expression with type-dependent subexpressions to have no type, so this function doesn't have to be fully recursive. */ bool type_dependent_expression_p (tree expression) { if (!processing_template_decl) return false; if (expression == NULL_TREE || expression == error_mark_node) return false; STRIP_ANY_LOCATION_WRAPPER (expression); /* An unresolved name is always dependent. */ if (identifier_p (expression) || TREE_CODE (expression) == USING_DECL || TREE_CODE (expression) == WILDCARD_DECL) return true; /* A fold expression is type-dependent. */ if (TREE_CODE (expression) == UNARY_LEFT_FOLD_EXPR || TREE_CODE (expression) == UNARY_RIGHT_FOLD_EXPR || TREE_CODE (expression) == BINARY_LEFT_FOLD_EXPR || TREE_CODE (expression) == BINARY_RIGHT_FOLD_EXPR) return true; /* Some expression forms are never type-dependent. */ if (TREE_CODE (expression) == PSEUDO_DTOR_EXPR || TREE_CODE (expression) == SIZEOF_EXPR || TREE_CODE (expression) == ALIGNOF_EXPR || TREE_CODE (expression) == AT_ENCODE_EXPR || TREE_CODE (expression) == NOEXCEPT_EXPR || TREE_CODE (expression) == TRAIT_EXPR || TREE_CODE (expression) == TYPEID_EXPR || TREE_CODE (expression) == DELETE_EXPR || TREE_CODE (expression) == VEC_DELETE_EXPR || TREE_CODE (expression) == THROW_EXPR || TREE_CODE (expression) == REQUIRES_EXPR) return false; /* The types of these expressions depends only on the type to which the cast occurs. */ if (TREE_CODE (expression) == DYNAMIC_CAST_EXPR || TREE_CODE (expression) == STATIC_CAST_EXPR || TREE_CODE (expression) == CONST_CAST_EXPR || TREE_CODE (expression) == REINTERPRET_CAST_EXPR || TREE_CODE (expression) == IMPLICIT_CONV_EXPR || TREE_CODE (expression) == CAST_EXPR) return dependent_type_p (TREE_TYPE (expression)); /* The types of these expressions depends only on the type created by the expression. */ if (TREE_CODE (expression) == NEW_EXPR || TREE_CODE (expression) == VEC_NEW_EXPR) { /* For NEW_EXPR tree nodes created inside a template, either the object type itself or a TREE_LIST may appear as the operand 1. */ tree type = TREE_OPERAND (expression, 1); if (TREE_CODE (type) == TREE_LIST) /* This is an array type. We need to check array dimensions as well. */ return dependent_type_p (TREE_VALUE (TREE_PURPOSE (type))) || value_dependent_expression_p (TREE_OPERAND (TREE_VALUE (type), 1)); else return dependent_type_p (type); } if (TREE_CODE (expression) == SCOPE_REF) { tree scope = TREE_OPERAND (expression, 0); tree name = TREE_OPERAND (expression, 1); /* 14.6.2.2 [temp.dep.expr]: An id-expression is type-dependent if it contains an identifier associated by name lookup with one or more declarations declared with a dependent type, or...a nested-name-specifier or qualified-id that names a member of an unknown specialization. */ return (type_dependent_expression_p (name) || dependent_scope_p (scope)); } if (TREE_CODE (expression) == TEMPLATE_DECL && !DECL_TEMPLATE_TEMPLATE_PARM_P (expression)) return uses_outer_template_parms (expression); if (TREE_CODE (expression) == STMT_EXPR) expression = stmt_expr_value_expr (expression); if (BRACE_ENCLOSED_INITIALIZER_P (expression)) { tree elt; unsigned i; FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (expression), i, elt) { if (type_dependent_expression_p (elt)) return true; } return false; } /* A static data member of the current instantiation with incomplete array type is type-dependent, as the definition and specializations can have different bounds. */ if (VAR_P (expression) && DECL_CLASS_SCOPE_P (expression) && dependent_type_p (DECL_CONTEXT (expression)) && VAR_HAD_UNKNOWN_BOUND (expression)) return true; /* An array of unknown bound depending on a variadic parameter, eg: template<typename... Args> void foo (Args... args) { int arr[] = { args... }; } template<int... vals> void bar () { int arr[] = { vals... }; } If the array has no length and has an initializer, it must be that we couldn't determine its length in cp_complete_array_type because it is dependent. */ if (VAR_P (expression) && TREE_TYPE (expression) != NULL_TREE && TREE_CODE (TREE_TYPE (expression)) == ARRAY_TYPE && !TYPE_DOMAIN (TREE_TYPE (expression)) && DECL_INITIAL (expression)) return true; /* A function or variable template-id is type-dependent if it has any dependent template arguments. */ if (VAR_OR_FUNCTION_DECL_P (expression) && DECL_LANG_SPECIFIC (expression) && DECL_TEMPLATE_INFO (expression)) { /* Consider the innermost template arguments, since those are the ones that come from the template-id; the template arguments for the enclosing class do not make it type-dependent unless they are used in the type of the decl. */ if (PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (expression)) && (any_dependent_template_arguments_p (INNERMOST_TEMPLATE_ARGS (DECL_TI_ARGS (expression))))) return true; } /* Otherwise, if the function decl isn't from a dependent scope, it can't be type-dependent. Checking this is important for functions with auto return type, which looks like a dependent type. */ if (TREE_CODE (expression) == FUNCTION_DECL && !(DECL_CLASS_SCOPE_P (expression) && dependent_type_p (DECL_CONTEXT (expression))) && !(DECL_LANG_SPECIFIC (expression) && DECL_FRIEND_P (expression) && (!DECL_FRIEND_CONTEXT (expression) || dependent_type_p (DECL_FRIEND_CONTEXT (expression)))) && !DECL_LOCAL_FUNCTION_P (expression)) { gcc_assert (!dependent_type_p (TREE_TYPE (expression)) || undeduced_auto_decl (expression)); return false; } /* Always dependent, on the number of arguments if nothing else. */ if (TREE_CODE (expression) == EXPR_PACK_EXPANSION) return true; if (TREE_TYPE (expression) == unknown_type_node) { if (TREE_CODE (expression) == ADDR_EXPR) return type_dependent_expression_p (TREE_OPERAND (expression, 0)); if (TREE_CODE (expression) == COMPONENT_REF || TREE_CODE (expression) == OFFSET_REF) { if (type_dependent_expression_p (TREE_OPERAND (expression, 0))) return true; expression = TREE_OPERAND (expression, 1); if (identifier_p (expression)) return false; } /* SCOPE_REF with non-null TREE_TYPE is always non-dependent. */ if (TREE_CODE (expression) == SCOPE_REF) return false; if (BASELINK_P (expression)) { if (BASELINK_OPTYPE (expression) && dependent_type_p (BASELINK_OPTYPE (expression))) return true; expression = BASELINK_FUNCTIONS (expression); } if (TREE_CODE (expression) == TEMPLATE_ID_EXPR) { if (any_dependent_template_arguments_p (TREE_OPERAND (expression, 1))) return true; expression = TREE_OPERAND (expression, 0); if (identifier_p (expression)) return true; } gcc_assert (TREE_CODE (expression) == OVERLOAD || TREE_CODE (expression) == FUNCTION_DECL); for (lkp_iterator iter (expression); iter; ++iter) if (type_dependent_expression_p (*iter)) return true; return false; } gcc_assert (TREE_CODE (expression) != TYPE_DECL); /* Dependent type attributes might not have made it from the decl to the type yet. */ if (DECL_P (expression) && any_dependent_type_attributes_p (DECL_ATTRIBUTES (expression))) return true; return (dependent_type_p (TREE_TYPE (expression))); } /* [temp.dep.expr]/5: A class member access expression (5.2.5) is type-dependent if the expression refers to a member of the current instantiation and the type of the referenced member is dependent, or the class member access expression refers to a member of an unknown specialization. This function returns true if the OBJECT in such a class member access expression is of an unknown specialization. */ bool type_dependent_object_expression_p (tree object) { /* An IDENTIFIER_NODE can sometimes have a TREE_TYPE, but it's still dependent. */ if (TREE_CODE (object) == IDENTIFIER_NODE) return true; tree scope = TREE_TYPE (object); return (!scope || dependent_scope_p (scope)); } /* walk_tree callback function for instantiation_dependent_expression_p, below. Returns non-zero if a dependent subexpression is found. */ static tree instantiation_dependent_r (tree *tp, int *walk_subtrees, void * /*data*/) { if (TYPE_P (*tp)) { /* We don't have to worry about decltype currently because decltype of an instantiation-dependent expr is a dependent type. This might change depending on the resolution of DR 1172. */ *walk_subtrees = false; return NULL_TREE; } enum tree_code code = TREE_CODE (*tp); switch (code) { /* Don't treat an argument list as dependent just because it has no TREE_TYPE. */ case TREE_LIST: case TREE_VEC: return NULL_TREE; case TEMPLATE_PARM_INDEX: return *tp; /* Handle expressions with type operands. */ case SIZEOF_EXPR: case ALIGNOF_EXPR: case TYPEID_EXPR: case AT_ENCODE_EXPR: { tree op = TREE_OPERAND (*tp, 0); if (code == SIZEOF_EXPR && SIZEOF_EXPR_TYPE_P (*tp)) op = TREE_TYPE (op); if (TYPE_P (op)) { if (dependent_type_p (op)) return *tp; else { *walk_subtrees = false; return NULL_TREE; } } break; } case COMPONENT_REF: if (identifier_p (TREE_OPERAND (*tp, 1))) /* In a template, finish_class_member_access_expr creates a COMPONENT_REF with an IDENTIFIER_NODE for op1 even if it isn't type-dependent, so that we can check access control at instantiation time (PR 42277). See also Core issue 1273. */ return *tp; break; case SCOPE_REF: if (instantiation_dependent_scope_ref_p (*tp)) return *tp; else break; /* Treat statement-expressions as dependent. */ case BIND_EXPR: return *tp; /* Treat requires-expressions as dependent. */ case REQUIRES_EXPR: return *tp; case CALL_EXPR: /* Treat calls to function concepts as dependent. */ if (function_concept_check_p (*tp)) return *tp; break; case TEMPLATE_ID_EXPR: /* And variable concepts. */ if (variable_concept_p (TREE_OPERAND (*tp, 0))) return *tp; break; default: break; } if (type_dependent_expression_p (*tp)) return *tp; else return NULL_TREE; } /* Returns TRUE if the EXPRESSION is instantiation-dependent, in the sense defined by the ABI: "An expression is instantiation-dependent if it is type-dependent or value-dependent, or it has a subexpression that is type-dependent or value-dependent." Except don't actually check value-dependence for unevaluated expressions, because in sizeof(i) we don't care about the value of i. Checking type-dependence will in turn check value-dependence of array bounds/template arguments as needed. */ bool instantiation_dependent_uneval_expression_p (tree expression) { tree result; if (!processing_template_decl) return false; if (expression == error_mark_node) return false; result = cp_walk_tree_without_duplicates (&expression, instantiation_dependent_r, NULL); return result != NULL_TREE; } /* As above, but also check value-dependence of the expression as a whole. */ bool instantiation_dependent_expression_p (tree expression) { return (instantiation_dependent_uneval_expression_p (expression) || value_dependent_expression_p (expression)); } /* Like type_dependent_expression_p, but it also works while not processing a template definition, i.e. during substitution or mangling. */ bool type_dependent_expression_p_push (tree expr) { bool b; ++processing_template_decl; b = type_dependent_expression_p (expr); --processing_template_decl; return b; } /* Returns TRUE if ARGS contains a type-dependent expression. */ bool any_type_dependent_arguments_p (const vec<tree, va_gc> *args) { unsigned int i; tree arg; FOR_EACH_VEC_SAFE_ELT (args, i, arg) { if (type_dependent_expression_p (arg)) return true; } return false; } /* Returns TRUE if LIST (a TREE_LIST whose TREE_VALUEs are expressions) contains any type-dependent expressions. */ bool any_type_dependent_elements_p (const_tree list) { for (; list; list = TREE_CHAIN (list)) if (type_dependent_expression_p (TREE_VALUE (list))) return true; return false; } /* Returns TRUE if LIST (a TREE_LIST whose TREE_VALUEs are expressions) contains any value-dependent expressions. */ bool any_value_dependent_elements_p (const_tree list) { for (; list; list = TREE_CHAIN (list)) if (value_dependent_expression_p (TREE_VALUE (list))) return true; return false; } /* Returns TRUE if the ARG (a template argument) is dependent. */ bool dependent_template_arg_p (tree arg) { if (!processing_template_decl) return false; /* Assume a template argument that was wrongly written by the user is dependent. This is consistent with what any_dependent_template_arguments_p [that calls this function] does. */ if (!arg || arg == error_mark_node) return true; if (TREE_CODE (arg) == ARGUMENT_PACK_SELECT) arg = argument_pack_select_arg (arg); if (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM) return true; if (TREE_CODE (arg) == TEMPLATE_DECL) { if (DECL_TEMPLATE_PARM_P (arg)) return true; /* A member template of a dependent class is not necessarily type-dependent, but it is a dependent template argument because it will be a member of an unknown specialization to that template. */ tree scope = CP_DECL_CONTEXT (arg); return TYPE_P (scope) && dependent_type_p (scope); } else if (ARGUMENT_PACK_P (arg)) { tree args = ARGUMENT_PACK_ARGS (arg); int i, len = TREE_VEC_LENGTH (args); for (i = 0; i < len; ++i) { if (dependent_template_arg_p (TREE_VEC_ELT (args, i))) return true; } return false; } else if (TYPE_P (arg)) return dependent_type_p (arg); else return (type_dependent_expression_p (arg) || value_dependent_expression_p (arg)); } /* Returns true if ARGS (a collection of template arguments) contains any types that require structural equality testing. */ bool any_template_arguments_need_structural_equality_p (tree args) { int i; int j; if (!args) return false; if (args == error_mark_node) return true; for (i = 0; i < TMPL_ARGS_DEPTH (args); ++i) { tree level = TMPL_ARGS_LEVEL (args, i + 1); for (j = 0; j < TREE_VEC_LENGTH (level); ++j) { tree arg = TREE_VEC_ELT (level, j); tree packed_args = NULL_TREE; int k, len = 1; if (ARGUMENT_PACK_P (arg)) { /* Look inside the argument pack. */ packed_args = ARGUMENT_PACK_ARGS (arg); len = TREE_VEC_LENGTH (packed_args); } for (k = 0; k < len; ++k) { if (packed_args) arg = TREE_VEC_ELT (packed_args, k); if (error_operand_p (arg)) return true; else if (TREE_CODE (arg) == TEMPLATE_DECL) continue; else if (TYPE_P (arg) && TYPE_STRUCTURAL_EQUALITY_P (arg)) return true; else if (!TYPE_P (arg) && TREE_TYPE (arg) && TYPE_STRUCTURAL_EQUALITY_P (TREE_TYPE (arg))) return true; } } } return false; } /* Returns true if ARGS (a collection of template arguments) contains any dependent arguments. */ bool any_dependent_template_arguments_p (const_tree args) { int i; int j; if (!args) return false; if (args == error_mark_node) return true; for (i = 0; i < TMPL_ARGS_DEPTH (args); ++i) { const_tree level = TMPL_ARGS_LEVEL (args, i + 1); for (j = 0; j < TREE_VEC_LENGTH (level); ++j) if (dependent_template_arg_p (TREE_VEC_ELT (level, j))) return true; } return false; } /* Returns true if ARGS contains any errors. */ bool any_erroneous_template_args_p (const_tree args) { int i; int j; if (args == error_mark_node) return true; if (args && TREE_CODE (args) != TREE_VEC) { if (tree ti = get_template_info (args)) args = TI_ARGS (ti); else args = NULL_TREE; } if (!args) return false; for (i = 0; i < TMPL_ARGS_DEPTH (args); ++i) { const_tree level = TMPL_ARGS_LEVEL (args, i + 1); for (j = 0; j < TREE_VEC_LENGTH (level); ++j) if (error_operand_p (TREE_VEC_ELT (level, j))) return true; } return false; } /* Returns TRUE if the template TMPL is type-dependent. */ bool dependent_template_p (tree tmpl) { if (TREE_CODE (tmpl) == OVERLOAD) { for (lkp_iterator iter (tmpl); iter; ++iter) if (dependent_template_p (*iter)) return true; return false; } /* Template template parameters are dependent. */ if (DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl) || TREE_CODE (tmpl) == TEMPLATE_TEMPLATE_PARM) return true; /* So are names that have not been looked up. */ if (TREE_CODE (tmpl) == SCOPE_REF || identifier_p (tmpl)) return true; return false; } /* Returns TRUE if the specialization TMPL<ARGS> is dependent. */ bool dependent_template_id_p (tree tmpl, tree args) { return (dependent_template_p (tmpl) || any_dependent_template_arguments_p (args)); } /* Returns TRUE if OMP_FOR with DECLV, INITV, CONDV and INCRV vectors are dependent. */ bool dependent_omp_for_p (tree declv, tree initv, tree condv, tree incrv) { int i; if (!processing_template_decl) return false; for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { tree decl = TREE_VEC_ELT (declv, i); tree init = TREE_VEC_ELT (initv, i); tree cond = TREE_VEC_ELT (condv, i); tree incr = TREE_VEC_ELT (incrv, i); if (type_dependent_expression_p (decl) || TREE_CODE (decl) == SCOPE_REF) return true; if (init && type_dependent_expression_p (init)) return true; if (type_dependent_expression_p (cond)) return true; if (COMPARISON_CLASS_P (cond) && (type_dependent_expression_p (TREE_OPERAND (cond, 0)) || type_dependent_expression_p (TREE_OPERAND (cond, 1)))) return true; if (TREE_CODE (incr) == MODOP_EXPR) { if (type_dependent_expression_p (TREE_OPERAND (incr, 0)) || type_dependent_expression_p (TREE_OPERAND (incr, 2))) return true; } else if (type_dependent_expression_p (incr)) return true; else if (TREE_CODE (incr) == MODIFY_EXPR) { if (type_dependent_expression_p (TREE_OPERAND (incr, 0))) return true; else if (BINARY_CLASS_P (TREE_OPERAND (incr, 1))) { tree t = TREE_OPERAND (incr, 1); if (type_dependent_expression_p (TREE_OPERAND (t, 0)) || type_dependent_expression_p (TREE_OPERAND (t, 1))) return true; } } } return false; } /* TYPE is a TYPENAME_TYPE. Returns the ordinary TYPE to which the TYPENAME_TYPE corresponds. Returns the original TYPENAME_TYPE if no such TYPE can be found. Note that this function peers inside uninstantiated templates and therefore should be used only in extremely limited situations. ONLY_CURRENT_P restricts this peering to the currently open classes hierarchy (which is required when comparing types). */ tree resolve_typename_type (tree type, bool only_current_p) { tree scope; tree name; tree decl; int quals; tree pushed_scope; tree result; gcc_assert (TREE_CODE (type) == TYPENAME_TYPE); scope = TYPE_CONTEXT (type); /* We shouldn't have built a TYPENAME_TYPE with a non-dependent scope. */ gcc_checking_assert (uses_template_parms (scope)); /* Usually the non-qualified identifier of a TYPENAME_TYPE is TYPE_IDENTIFIER (type). But when 'type' is a typedef variant of a TYPENAME_TYPE node, then TYPE_NAME (type) is set to the TYPE_DECL representing the typedef. In that case TYPE_IDENTIFIER (type) is not the non-qualified identifier of the TYPENAME_TYPE anymore. So by getting the TYPE_IDENTIFIER of the _main declaration_ of the TYPENAME_TYPE instead, we avoid messing up with a possible typedef variant case. */ name = TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (type)); /* If the SCOPE is itself a TYPENAME_TYPE, then we need to resolve it first before we can figure out what NAME refers to. */ if (TREE_CODE (scope) == TYPENAME_TYPE) { if (TYPENAME_IS_RESOLVING_P (scope)) /* Given a class template A with a dependent base with nested type C, typedef typename A::C::C C will land us here, as trying to resolve the initial A::C leads to the local C typedef, which leads back to A::C::C. So we break the recursion now. */ return type; else scope = resolve_typename_type (scope, only_current_p); } /* If we don't know what SCOPE refers to, then we cannot resolve the TYPENAME_TYPE. */ if (!CLASS_TYPE_P (scope)) return type; /* If this is a typedef, we don't want to look inside (c++/11987). */ if (typedef_variant_p (type)) return type; /* If SCOPE isn't the template itself, it will not have a valid TYPE_FIELDS list. */ if (same_type_p (scope, CLASSTYPE_PRIMARY_TEMPLATE_TYPE (scope))) /* scope is either the template itself or a compatible instantiation like X<T>, so look up the name in the original template. */ scope = CLASSTYPE_PRIMARY_TEMPLATE_TYPE (scope); /* If scope has no fields, it can't be a current instantiation. Check this before currently_open_class to avoid infinite recursion (71515). */ if (!TYPE_FIELDS (scope)) return type; /* If the SCOPE is not the current instantiation, there's no reason to look inside it. */ if (only_current_p && !currently_open_class (scope)) return type; /* Enter the SCOPE so that name lookup will be resolved as if we were in the class definition. In particular, SCOPE will no longer be considered a dependent type. */ pushed_scope = push_scope (scope); /* Look up the declaration. */ decl = lookup_member (scope, name, /*protect=*/0, /*want_type=*/true, tf_warning_or_error); result = NULL_TREE; /* For a TYPENAME_TYPE like "typename X::template Y<T>", we want to find a TEMPLATE_DECL. Otherwise, we want to find a TYPE_DECL. */ tree fullname = TYPENAME_TYPE_FULLNAME (type); if (!decl) /*nop*/; else if (identifier_p (fullname) && TREE_CODE (decl) == TYPE_DECL) { result = TREE_TYPE (decl); if (result == error_mark_node) result = NULL_TREE; } else if (TREE_CODE (fullname) == TEMPLATE_ID_EXPR && DECL_CLASS_TEMPLATE_P (decl)) { /* Obtain the template and the arguments. */ tree tmpl = TREE_OPERAND (fullname, 0); if (TREE_CODE (tmpl) == IDENTIFIER_NODE) { /* We get here with a plain identifier because a previous tentative parse of the nested-name-specifier as part of a ptr-operator saw ::template X<A>. The use of ::template is necessary in a ptr-operator, but wrong in a declarator-id. [temp.names]: In a qualified-id of a declarator-id, the keyword template shall not appear at the top level. */ pedwarn (EXPR_LOC_OR_LOC (fullname, input_location), OPT_Wpedantic, "keyword %<template%> not allowed in declarator-id"); tmpl = decl; } tree args = TREE_OPERAND (fullname, 1); /* Instantiate the template. */ result = lookup_template_class (tmpl, args, NULL_TREE, NULL_TREE, /*entering_scope=*/true, tf_error | tf_user); if (result == error_mark_node) result = NULL_TREE; } /* Leave the SCOPE. */ if (pushed_scope) pop_scope (pushed_scope); /* If we failed to resolve it, return the original typename. */ if (!result) return type; /* If lookup found a typename type, resolve that too. */ if (TREE_CODE (result) == TYPENAME_TYPE && !TYPENAME_IS_RESOLVING_P (result)) { /* Ill-formed programs can cause infinite recursion here, so we must catch that. */ TYPENAME_IS_RESOLVING_P (result) = 1; result = resolve_typename_type (result, only_current_p); TYPENAME_IS_RESOLVING_P (result) = 0; } /* Qualify the resulting type. */ quals = cp_type_quals (type); if (quals) result = cp_build_qualified_type (result, cp_type_quals (result) | quals); return result; } /* EXPR is an expression which is not type-dependent. Return a proxy for EXPR that can be used to compute the types of larger expressions containing EXPR. */ tree build_non_dependent_expr (tree expr) { tree orig_expr = expr; tree inner_expr; /* When checking, try to get a constant value for all non-dependent expressions in order to expose bugs in *_dependent_expression_p and constexpr. This can affect code generation, see PR70704, so only do this for -fchecking=2. */ if (flag_checking > 1 && cxx_dialect >= cxx11 /* Don't do this during nsdmi parsing as it can lead to unexpected recursive instantiations. */ && !parsing_nsdmi () /* Don't do this during concept expansion either and for the same reason. */ && !expanding_concept ()) fold_non_dependent_expr (expr); STRIP_ANY_LOCATION_WRAPPER (expr); /* Preserve OVERLOADs; the functions must be available to resolve types. */ inner_expr = expr; if (TREE_CODE (inner_expr) == STMT_EXPR) inner_expr = stmt_expr_value_expr (inner_expr); if (TREE_CODE (inner_expr) == ADDR_EXPR) inner_expr = TREE_OPERAND (inner_expr, 0); if (TREE_CODE (inner_expr) == COMPONENT_REF) inner_expr = TREE_OPERAND (inner_expr, 1); if (is_overloaded_fn (inner_expr) || TREE_CODE (inner_expr) == OFFSET_REF) return orig_expr; /* There is no need to return a proxy for a variable. */ if (VAR_P (expr)) return orig_expr; /* Preserve string constants; conversions from string constants to "char *" are allowed, even though normally a "const char *" cannot be used to initialize a "char *". */ if (TREE_CODE (expr) == STRING_CST) return orig_expr; /* Preserve void and arithmetic constants, as an optimization -- there is no reason to create a new node. */ if (TREE_CODE (expr) == VOID_CST || TREE_CODE (expr) == INTEGER_CST || TREE_CODE (expr) == REAL_CST) return orig_expr; /* Preserve THROW_EXPRs -- all throw-expressions have type "void". There is at least one place where we want to know that a particular expression is a throw-expression: when checking a ?: expression, there are special rules if the second or third argument is a throw-expression. */ if (TREE_CODE (expr) == THROW_EXPR) return orig_expr; /* Don't wrap an initializer list, we need to be able to look inside. */ if (BRACE_ENCLOSED_INITIALIZER_P (expr)) return orig_expr; /* Don't wrap a dummy object, we need to be able to test for it. */ if (is_dummy_object (expr)) return orig_expr; if (TREE_CODE (expr) == COND_EXPR) return build3 (COND_EXPR, TREE_TYPE (expr), TREE_OPERAND (expr, 0), (TREE_OPERAND (expr, 1) ? build_non_dependent_expr (TREE_OPERAND (expr, 1)) : build_non_dependent_expr (TREE_OPERAND (expr, 0))), build_non_dependent_expr (TREE_OPERAND (expr, 2))); if (TREE_CODE (expr) == COMPOUND_EXPR && !COMPOUND_EXPR_OVERLOADED (expr)) return build2 (COMPOUND_EXPR, TREE_TYPE (expr), TREE_OPERAND (expr, 0), build_non_dependent_expr (TREE_OPERAND (expr, 1))); /* If the type is unknown, it can't really be non-dependent */ gcc_assert (TREE_TYPE (expr) != unknown_type_node); /* Otherwise, build a NON_DEPENDENT_EXPR. */ return build1_loc (EXPR_LOCATION (orig_expr), NON_DEPENDENT_EXPR, TREE_TYPE (expr), expr); } /* ARGS is a vector of expressions as arguments to a function call. Replace the arguments with equivalent non-dependent expressions. This modifies ARGS in place. */ void make_args_non_dependent (vec<tree, va_gc> *args) { unsigned int ix; tree arg; FOR_EACH_VEC_SAFE_ELT (args, ix, arg) { tree newarg = build_non_dependent_expr (arg); if (newarg != arg) (*args)[ix] = newarg; } } /* Returns a type which represents 'auto' or 'decltype(auto)'. We use a TEMPLATE_TYPE_PARM with a level one deeper than the actual template parms. If set_canonical is true, we set TYPE_CANONICAL on it. */ static tree make_auto_1 (tree name, bool set_canonical) { tree au = cxx_make_type (TEMPLATE_TYPE_PARM); TYPE_NAME (au) = build_decl (input_location, TYPE_DECL, name, au); TYPE_STUB_DECL (au) = TYPE_NAME (au); TEMPLATE_TYPE_PARM_INDEX (au) = build_template_parm_index (0, processing_template_decl + 1, processing_template_decl + 1, TYPE_NAME (au), NULL_TREE); if (set_canonical) TYPE_CANONICAL (au) = canonical_type_parameter (au); DECL_ARTIFICIAL (TYPE_NAME (au)) = 1; SET_DECL_TEMPLATE_PARM_P (TYPE_NAME (au)); return au; } tree make_decltype_auto (void) { return make_auto_1 (decltype_auto_identifier, true); } tree make_auto (void) { return make_auto_1 (auto_identifier, true); } /* Return a C++17 deduction placeholder for class template TMPL. */ tree make_template_placeholder (tree tmpl) { tree t = make_auto_1 (DECL_NAME (tmpl), true); CLASS_PLACEHOLDER_TEMPLATE (t) = tmpl; return t; } /* True iff T is a C++17 class template deduction placeholder. */ bool template_placeholder_p (tree t) { return is_auto (t) && CLASS_PLACEHOLDER_TEMPLATE (t); } /* Make a "constrained auto" type-specifier. This is an auto type with constraints that must be associated after deduction. The constraint is formed from the given CONC and its optional sequence of arguments, which are non-null if written as partial-concept-id. */ tree make_constrained_auto (tree con, tree args) { tree type = make_auto_1 (auto_identifier, false); /* Build the constraint. */ tree tmpl = DECL_TI_TEMPLATE (con); tree expr = VAR_P (con) ? tmpl : ovl_make (tmpl); expr = build_concept_check (expr, type, args); tree constr = normalize_expression (expr); PLACEHOLDER_TYPE_CONSTRAINTS (type) = constr; /* Our canonical type depends on the constraint. */ TYPE_CANONICAL (type) = canonical_type_parameter (type); /* Attach the constraint to the type declaration. */ tree decl = TYPE_NAME (type); return decl; } /* Given type ARG, return std::initializer_list<ARG>. */ static tree listify (tree arg) { tree std_init_list = get_namespace_binding (std_node, init_list_identifier); if (!std_init_list || !DECL_CLASS_TEMPLATE_P (std_init_list)) { gcc_rich_location richloc (input_location); maybe_add_include_fixit (&richloc, "<initializer_list>"); error_at (&richloc, "deducing from brace-enclosed initializer list" " requires %<#include <initializer_list>%>"); return error_mark_node; } tree argvec = make_tree_vec (1); TREE_VEC_ELT (argvec, 0) = arg; return lookup_template_class (std_init_list, argvec, NULL_TREE, NULL_TREE, 0, tf_warning_or_error); } /* Replace auto in TYPE with std::initializer_list<auto>. */ static tree listify_autos (tree type, tree auto_node) { tree init_auto = listify (auto_node); tree argvec = make_tree_vec (1); TREE_VEC_ELT (argvec, 0) = init_auto; if (processing_template_decl) argvec = add_to_template_args (current_template_args (), argvec); return tsubst (type, argvec, tf_warning_or_error, NULL_TREE); } /* Hash traits for hashing possibly constrained 'auto' TEMPLATE_TYPE_PARMs for use by do_auto_deduction. */ struct auto_hash : default_hash_traits<tree> { static inline hashval_t hash (tree); static inline bool equal (tree, tree); }; /* Hash the 'auto' T. */ inline hashval_t auto_hash::hash (tree t) { if (tree c = PLACEHOLDER_TYPE_CONSTRAINTS (t)) /* Matching constrained-type-specifiers denote the same template parameter, so hash the constraint. */ return hash_placeholder_constraint (c); else /* But unconstrained autos are all separate, so just hash the pointer. */ return iterative_hash_object (t, 0); } /* Compare two 'auto's. */ inline bool auto_hash::equal (tree t1, tree t2) { if (t1 == t2) return true; tree c1 = PLACEHOLDER_TYPE_CONSTRAINTS (t1); tree c2 = PLACEHOLDER_TYPE_CONSTRAINTS (t2); /* Two unconstrained autos are distinct. */ if (!c1 || !c2) return false; return equivalent_placeholder_constraints (c1, c2); } /* for_each_template_parm callback for extract_autos: if t is a (possibly constrained) auto, add it to the vector. */ static int extract_autos_r (tree t, void *data) { hash_table<auto_hash> &hash = *(hash_table<auto_hash>*)data; if (is_auto (t)) { /* All the autos were built with index 0; fix that up now. */ tree *p = hash.find_slot (t, INSERT); unsigned idx; if (*p) /* If this is a repeated constrained-type-specifier, use the index we chose before. */ idx = TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (*p)); else { /* Otherwise this is new, so use the current count. */ *p = t; idx = hash.elements () - 1; } TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (t)) = idx; } /* Always keep walking. */ return 0; } /* Return a TREE_VEC of the 'auto's used in type under the Concepts TS, which says they can appear anywhere in the type. */ static tree extract_autos (tree type) { hash_set<tree> visited; hash_table<auto_hash> hash (2); for_each_template_parm (type, extract_autos_r, &hash, &visited, true); tree tree_vec = make_tree_vec (hash.elements()); for (hash_table<auto_hash>::iterator iter = hash.begin(); iter != hash.end(); ++iter) { tree elt = *iter; unsigned i = TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (elt)); TREE_VEC_ELT (tree_vec, i) = build_tree_list (NULL_TREE, TYPE_NAME (elt)); } return tree_vec; } /* The stem for deduction guide names. */ const char *const dguide_base = "__dguide_"; /* Return the name for a deduction guide for class template TMPL. */ tree dguide_name (tree tmpl) { tree type = (TYPE_P (tmpl) ? tmpl : TREE_TYPE (tmpl)); tree tname = TYPE_IDENTIFIER (type); char *buf = (char *) alloca (1 + strlen (dguide_base) + IDENTIFIER_LENGTH (tname)); memcpy (buf, dguide_base, strlen (dguide_base)); memcpy (buf + strlen (dguide_base), IDENTIFIER_POINTER (tname), IDENTIFIER_LENGTH (tname) + 1); tree dname = get_identifier (buf); TREE_TYPE (dname) = type; return dname; } /* True if NAME is the name of a deduction guide. */ bool dguide_name_p (tree name) { return (TREE_CODE (name) == IDENTIFIER_NODE && TREE_TYPE (name) && !strncmp (IDENTIFIER_POINTER (name), dguide_base, strlen (dguide_base))); } /* True if FN is a deduction guide. */ bool deduction_guide_p (const_tree fn) { if (DECL_P (fn)) if (tree name = DECL_NAME (fn)) return dguide_name_p (name); return false; } /* True if FN is the copy deduction guide, i.e. A(A)->A. */ bool copy_guide_p (const_tree fn) { gcc_assert (deduction_guide_p (fn)); if (!DECL_ARTIFICIAL (fn)) return false; tree parms = FUNCTION_FIRST_USER_PARMTYPE (DECL_TI_TEMPLATE (fn)); return (TREE_CHAIN (parms) == void_list_node && same_type_p (TREE_VALUE (parms), TREE_TYPE (DECL_NAME (fn)))); } /* True if FN is a guide generated from a constructor template. */ bool template_guide_p (const_tree fn) { gcc_assert (deduction_guide_p (fn)); if (!DECL_ARTIFICIAL (fn)) return false; tree tmpl = DECL_TI_TEMPLATE (fn); if (tree org = DECL_ABSTRACT_ORIGIN (tmpl)) return PRIMARY_TEMPLATE_P (org); return false; } /* OLDDECL is a _DECL for a template parameter. Return a similar parameter at LEVEL:INDEX, using tsubst_args and complain for substitution into non-type template parameter types. Note that the handling of template template parameters relies on current_template_parms being set appropriately for the new template. */ static tree rewrite_template_parm (tree olddecl, unsigned index, unsigned level, tree tsubst_args, tsubst_flags_t complain) { if (olddecl == error_mark_node) return error_mark_node; tree oldidx = get_template_parm_index (olddecl); tree newtype; if (TREE_CODE (olddecl) == TYPE_DECL || TREE_CODE (olddecl) == TEMPLATE_DECL) { tree oldtype = TREE_TYPE (olddecl); newtype = cxx_make_type (TREE_CODE (oldtype)); TYPE_MAIN_VARIANT (newtype) = newtype; if (TREE_CODE (oldtype) == TEMPLATE_TYPE_PARM) TEMPLATE_TYPE_PARM_FOR_CLASS (newtype) = TEMPLATE_TYPE_PARM_FOR_CLASS (oldtype); } else { newtype = TREE_TYPE (olddecl); if (type_uses_auto (newtype)) { // Substitute once to fix references to other template parameters. newtype = tsubst (newtype, tsubst_args, complain|tf_partial, NULL_TREE); // Now substitute again to reduce the level of the auto. newtype = tsubst (newtype, current_template_args (), complain, NULL_TREE); } else newtype = tsubst (newtype, tsubst_args, complain, NULL_TREE); } tree newdecl = build_decl (DECL_SOURCE_LOCATION (olddecl), TREE_CODE (olddecl), DECL_NAME (olddecl), newtype); SET_DECL_TEMPLATE_PARM_P (newdecl); tree newidx; if (TREE_CODE (olddecl) == TYPE_DECL || TREE_CODE (olddecl) == TEMPLATE_DECL) { newidx = TEMPLATE_TYPE_PARM_INDEX (newtype) = build_template_parm_index (index, level, level, newdecl, newtype); TEMPLATE_PARM_PARAMETER_PACK (newidx) = TEMPLATE_PARM_PARAMETER_PACK (oldidx); TYPE_STUB_DECL (newtype) = TYPE_NAME (newtype) = newdecl; TYPE_CANONICAL (newtype) = canonical_type_parameter (newtype); if (TREE_CODE (olddecl) == TEMPLATE_DECL) { DECL_TEMPLATE_RESULT (newdecl) = build_decl (DECL_SOURCE_LOCATION (olddecl), TYPE_DECL, DECL_NAME (olddecl), newtype); DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (newdecl)) = true; // First create a copy (ttargs) of tsubst_args with an // additional level for the template template parameter's own // template parameters (ttparms). tree ttparms = (INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (olddecl))); const int depth = TMPL_ARGS_DEPTH (tsubst_args); tree ttargs = make_tree_vec (depth + 1); for (int i = 0; i < depth; ++i) TREE_VEC_ELT (ttargs, i) = TREE_VEC_ELT (tsubst_args, i); TREE_VEC_ELT (ttargs, depth) = template_parms_level_to_args (ttparms); // Substitute ttargs into ttparms to fix references to // other template parameters. ttparms = tsubst_template_parms_level (ttparms, ttargs, complain|tf_partial); // Now substitute again with args based on tparms, to reduce // the level of the ttparms. ttargs = current_template_args (); ttparms = tsubst_template_parms_level (ttparms, ttargs, complain); // Finally, tack the adjusted parms onto tparms. ttparms = tree_cons (size_int (depth), ttparms, current_template_parms); DECL_TEMPLATE_PARMS (newdecl) = ttparms; } } else { tree oldconst = TEMPLATE_PARM_DECL (oldidx); tree newconst = build_decl (DECL_SOURCE_LOCATION (oldconst), TREE_CODE (oldconst), DECL_NAME (oldconst), newtype); TREE_CONSTANT (newconst) = TREE_CONSTANT (newdecl) = TREE_READONLY (newconst) = TREE_READONLY (newdecl) = true; SET_DECL_TEMPLATE_PARM_P (newconst); newidx = build_template_parm_index (index, level, level, newconst, newtype); TEMPLATE_PARM_PARAMETER_PACK (newidx) = TEMPLATE_PARM_PARAMETER_PACK (oldidx); DECL_INITIAL (newdecl) = DECL_INITIAL (newconst) = newidx; } return newdecl; } /* Returns a C++17 class deduction guide template based on the constructor CTOR. As a special case, CTOR can be a RECORD_TYPE for an implicit default guide, or REFERENCE_TYPE for an implicit copy/move guide. */ static tree build_deduction_guide (tree ctor, tree outer_args, tsubst_flags_t complain) { tree type, tparms, targs, fparms, fargs, ci; bool memtmpl = false; bool explicit_p; location_t loc; tree fn_tmpl = NULL_TREE; if (TYPE_P (ctor)) { type = ctor; bool copy_p = TREE_CODE (type) == REFERENCE_TYPE; if (copy_p) { type = TREE_TYPE (type); fparms = tree_cons (NULL_TREE, type, void_list_node); } else fparms = void_list_node; tree ctmpl = CLASSTYPE_TI_TEMPLATE (type); tparms = DECL_TEMPLATE_PARMS (ctmpl); targs = CLASSTYPE_TI_ARGS (type); ci = NULL_TREE; fargs = NULL_TREE; loc = DECL_SOURCE_LOCATION (ctmpl); explicit_p = false; } else { ++processing_template_decl; bool ok = true; fn_tmpl = (TREE_CODE (ctor) == TEMPLATE_DECL ? ctor : DECL_TI_TEMPLATE (ctor)); if (outer_args) fn_tmpl = tsubst (fn_tmpl, outer_args, complain, ctor); ctor = DECL_TEMPLATE_RESULT (fn_tmpl); type = DECL_CONTEXT (ctor); tparms = DECL_TEMPLATE_PARMS (fn_tmpl); /* If type is a member class template, DECL_TI_ARGS (ctor) will have fully specialized args for the enclosing class. Strip those off, as the deduction guide won't have those template parameters. */ targs = get_innermost_template_args (DECL_TI_ARGS (ctor), TMPL_PARMS_DEPTH (tparms)); /* Discard the 'this' parameter. */ fparms = FUNCTION_ARG_CHAIN (ctor); fargs = TREE_CHAIN (DECL_ARGUMENTS (ctor)); ci = get_constraints (ctor); loc = DECL_SOURCE_LOCATION (ctor); explicit_p = DECL_NONCONVERTING_P (ctor); if (PRIMARY_TEMPLATE_P (fn_tmpl)) { memtmpl = true; /* For a member template constructor, we need to flatten the two template parameter lists into one, and then adjust the function signature accordingly. This gets...complicated. */ tree save_parms = current_template_parms; /* For a member template we should have two levels of parms/args, one for the class and one for the constructor. We stripped specialized args for further enclosing classes above. */ const int depth = 2; gcc_assert (TMPL_ARGS_DEPTH (targs) == depth); /* Template args for translating references to the two-level template parameters into references to the one-level template parameters we are creating. */ tree tsubst_args = copy_node (targs); TMPL_ARGS_LEVEL (tsubst_args, depth) = copy_node (TMPL_ARGS_LEVEL (tsubst_args, depth)); /* Template parms for the constructor template. */ tree ftparms = TREE_VALUE (tparms); unsigned flen = TREE_VEC_LENGTH (ftparms); /* Template parms for the class template. */ tparms = TREE_CHAIN (tparms); tree ctparms = TREE_VALUE (tparms); unsigned clen = TREE_VEC_LENGTH (ctparms); /* Template parms for the deduction guide start as a copy of the template parms for the class. We set current_template_parms for lookup_template_class_1. */ current_template_parms = tparms = copy_node (tparms); tree new_vec = TREE_VALUE (tparms) = make_tree_vec (flen + clen); for (unsigned i = 0; i < clen; ++i) TREE_VEC_ELT (new_vec, i) = TREE_VEC_ELT (ctparms, i); /* Now we need to rewrite the constructor parms to append them to the class parms. */ for (unsigned i = 0; i < flen; ++i) { unsigned index = i + clen; unsigned level = 1; tree oldelt = TREE_VEC_ELT (ftparms, i); tree olddecl = TREE_VALUE (oldelt); tree newdecl = rewrite_template_parm (olddecl, index, level, tsubst_args, complain); if (newdecl == error_mark_node) ok = false; tree newdef = tsubst_template_arg (TREE_PURPOSE (oldelt), tsubst_args, complain, ctor); tree list = build_tree_list (newdef, newdecl); TEMPLATE_PARM_CONSTRAINTS (list) = tsubst_constraint_info (TEMPLATE_PARM_CONSTRAINTS (oldelt), tsubst_args, complain, ctor); TREE_VEC_ELT (new_vec, index) = list; TMPL_ARG (tsubst_args, depth, i) = template_parm_to_arg (list); } /* Now we have a final set of template parms to substitute into the function signature. */ targs = template_parms_to_args (tparms); fparms = tsubst_arg_types (fparms, tsubst_args, NULL_TREE, complain, ctor); fargs = tsubst (fargs, tsubst_args, complain, ctor); if (ci) ci = tsubst_constraint_info (ci, tsubst_args, complain, ctor); current_template_parms = save_parms; } --processing_template_decl; if (!ok) return error_mark_node; } if (!memtmpl) { /* Copy the parms so we can set DECL_PRIMARY_TEMPLATE. */ tparms = copy_node (tparms); INNERMOST_TEMPLATE_PARMS (tparms) = copy_node (INNERMOST_TEMPLATE_PARMS (tparms)); } tree fntype = build_function_type (type, fparms); tree ded_fn = build_lang_decl_loc (loc, FUNCTION_DECL, dguide_name (type), fntype); DECL_ARGUMENTS (ded_fn) = fargs; DECL_ARTIFICIAL (ded_fn) = true; DECL_NONCONVERTING_P (ded_fn) = explicit_p; tree ded_tmpl = build_template_decl (ded_fn, tparms, /*member*/false); DECL_ARTIFICIAL (ded_tmpl) = true; DECL_TEMPLATE_RESULT (ded_tmpl) = ded_fn; TREE_TYPE (ded_tmpl) = TREE_TYPE (ded_fn); DECL_TEMPLATE_INFO (ded_fn) = build_template_info (ded_tmpl, targs); DECL_PRIMARY_TEMPLATE (ded_tmpl) = ded_tmpl; if (DECL_P (ctor)) DECL_ABSTRACT_ORIGIN (ded_tmpl) = fn_tmpl; if (ci) set_constraints (ded_tmpl, ci); return ded_tmpl; } /* Deduce template arguments for the class template placeholder PTYPE for template TMPL based on the initializer INIT, and return the resulting type. */ static tree do_class_deduction (tree ptype, tree tmpl, tree init, int flags, tsubst_flags_t complain) { if (!DECL_CLASS_TEMPLATE_P (tmpl)) { /* We should have handled this in the caller. */ if (DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl)) return ptype; if (complain & tf_error) error ("non-class template %qT used without template arguments", tmpl); return error_mark_node; } tree type = TREE_TYPE (tmpl); bool try_list_ctor = false; vec<tree,va_gc> *args; if (init == NULL_TREE || TREE_CODE (init) == TREE_LIST) args = make_tree_vector_from_list (init); else if (BRACE_ENCLOSED_INITIALIZER_P (init)) { try_list_ctor = TYPE_HAS_LIST_CTOR (type); if (try_list_ctor && CONSTRUCTOR_NELTS (init) == 1) { /* As an exception, the first phase in 16.3.1.7 (considering the initializer list as a single argument) is omitted if the initializer list consists of a single expression of type cv U, where U is a specialization of C or a class derived from a specialization of C. */ tree elt = CONSTRUCTOR_ELT (init, 0)->value; tree etype = TREE_TYPE (elt); tree tparms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (tmpl)); tree targs = make_tree_vec (TREE_VEC_LENGTH (tparms)); int err = unify (tparms, targs, type, etype, UNIFY_ALLOW_DERIVED, /*explain*/false); if (err == 0) try_list_ctor = false; ggc_free (targs); } if (try_list_ctor || is_std_init_list (type)) args = make_tree_vector_single (init); else args = make_tree_vector_from_ctor (init); } else args = make_tree_vector_single (init); tree dname = dguide_name (tmpl); tree cands = lookup_qualified_name (CP_DECL_CONTEXT (tmpl), dname, /*type*/false, /*complain*/false, /*hidden*/false); bool elided = false; if (cands == error_mark_node) cands = NULL_TREE; /* Prune explicit deduction guides in copy-initialization context. */ if (flags & LOOKUP_ONLYCONVERTING) { for (lkp_iterator iter (cands); !elided && iter; ++iter) if (DECL_NONCONVERTING_P (STRIP_TEMPLATE (*iter))) elided = true; if (elided) { /* Found a nonconverting guide, prune the candidates. */ tree pruned = NULL_TREE; for (lkp_iterator iter (cands); iter; ++iter) if (!DECL_NONCONVERTING_P (STRIP_TEMPLATE (*iter))) pruned = lookup_add (*iter, pruned); cands = pruned; } } tree outer_args = NULL_TREE; if (DECL_CLASS_SCOPE_P (tmpl) && CLASSTYPE_TEMPLATE_INFO (DECL_CONTEXT (tmpl))) { outer_args = CLASSTYPE_TI_ARGS (DECL_CONTEXT (tmpl)); type = TREE_TYPE (most_general_template (tmpl)); } bool saw_ctor = false; // FIXME cache artificial deduction guides for (ovl_iterator iter (CLASSTYPE_CONSTRUCTORS (type)); iter; ++iter) { /* Skip inherited constructors. */ if (iter.using_p ()) continue; tree guide = build_deduction_guide (*iter, outer_args, complain); if (guide == error_mark_node) return error_mark_node; if ((flags & LOOKUP_ONLYCONVERTING) && DECL_NONCONVERTING_P (STRIP_TEMPLATE (guide))) elided = true; else cands = lookup_add (guide, cands); saw_ctor = true; } tree call = error_mark_node; /* If this is list-initialization and the class has a list constructor, first try deducing from the list as a single argument, as [over.match.list]. */ tree list_cands = NULL_TREE; if (try_list_ctor && cands) for (lkp_iterator iter (cands); iter; ++iter) { tree dg = *iter; if (is_list_ctor (dg)) list_cands = lookup_add (dg, list_cands); } if (list_cands) { ++cp_unevaluated_operand; call = build_new_function_call (list_cands, &args, tf_decltype); --cp_unevaluated_operand; if (call == error_mark_node) { /* That didn't work, now try treating the list as a sequence of arguments. */ release_tree_vector (args); args = make_tree_vector_from_ctor (init); } } /* Maybe generate an implicit deduction guide. */ if (call == error_mark_node && args->length () < 2) { tree gtype = NULL_TREE; if (args->length () == 1) /* Generate a copy guide. */ gtype = build_reference_type (type); else if (!saw_ctor) /* Generate a default guide. */ gtype = type; if (gtype) { tree guide = build_deduction_guide (gtype, outer_args, complain); if (guide == error_mark_node) return error_mark_node; cands = lookup_add (guide, cands); } } if (elided && !cands) { error ("cannot deduce template arguments for copy-initialization" " of %qT, as it has no non-explicit deduction guides or " "user-declared constructors", type); return error_mark_node; } else if (!cands && call == error_mark_node) { error ("cannot deduce template arguments of %qT, as it has no viable " "deduction guides", type); return error_mark_node; } if (call == error_mark_node) { ++cp_unevaluated_operand; call = build_new_function_call (cands, &args, tf_decltype); --cp_unevaluated_operand; } if (call == error_mark_node && (complain & tf_warning_or_error)) { error ("class template argument deduction failed:"); ++cp_unevaluated_operand; call = build_new_function_call (cands, &args, complain | tf_decltype); --cp_unevaluated_operand; if (elided) inform (input_location, "explicit deduction guides not considered " "for copy-initialization"); } release_tree_vector (args); return cp_build_qualified_type (TREE_TYPE (call), cp_type_quals (ptype)); } /* Replace occurrences of 'auto' in TYPE with the appropriate type deduced from INIT. AUTO_NODE is the TEMPLATE_TYPE_PARM used for 'auto' in TYPE. The CONTEXT determines the context in which auto deduction is performed and is used to control error diagnostics. FLAGS are the LOOKUP_* flags. OUTER_TARGS are used during template argument deduction (context == adc_unify) to properly substitute the result, and is ignored in other contexts. For partial-concept-ids, extra args may be appended to the list of deduced template arguments prior to determining constraint satisfaction. */ tree do_auto_deduction (tree type, tree init, tree auto_node, tsubst_flags_t complain, auto_deduction_context context, tree outer_targs, int flags) { tree targs; if (init == error_mark_node) return error_mark_node; if (init && type_dependent_expression_p (init) && context != adc_unify) /* Defining a subset of type-dependent expressions that we can deduce from ahead of time isn't worth the trouble. */ return type; /* Similarly, we can't deduce from another undeduced decl. */ if (init && undeduced_auto_decl (init)) return type; if (tree tmpl = CLASS_PLACEHOLDER_TEMPLATE (auto_node)) /* C++17 class template argument deduction. */ return do_class_deduction (type, tmpl, init, flags, complain); if (init == NULL_TREE || TREE_TYPE (init) == NULL_TREE) /* Nothing we can do with this, even in deduction context. */ return type; /* [dcl.spec.auto]: Obtain P from T by replacing the occurrences of auto with either a new invented type template parameter U or, if the initializer is a braced-init-list (8.5.4), with std::initializer_list<U>. */ if (BRACE_ENCLOSED_INITIALIZER_P (init)) { if (!DIRECT_LIST_INIT_P (init)) type = listify_autos (type, auto_node); else if (CONSTRUCTOR_NELTS (init) == 1) init = CONSTRUCTOR_ELT (init, 0)->value; else { if (complain & tf_warning_or_error) { if (permerror (input_location, "direct-list-initialization of " "%<auto%> requires exactly one element")) inform (input_location, "for deduction to %<std::initializer_list%>, use copy-" "list-initialization (i.e. add %<=%> before the %<{%>)"); } type = listify_autos (type, auto_node); } } if (type == error_mark_node) return error_mark_node; init = resolve_nondeduced_context (init, complain); if (context == adc_decomp_type && auto_node == type && init != error_mark_node && TREE_CODE (TREE_TYPE (init)) == ARRAY_TYPE) /* [dcl.decomp]/1 - if decomposition declaration has no ref-qualifiers and initializer has array type, deduce cv-qualified array type. */ return cp_build_qualified_type_real (TREE_TYPE (init), TYPE_QUALS (type), complain); else if (AUTO_IS_DECLTYPE (auto_node)) { bool id = (DECL_P (init) || ((TREE_CODE (init) == COMPONENT_REF || TREE_CODE (init) == SCOPE_REF) && !REF_PARENTHESIZED_P (init))); targs = make_tree_vec (1); TREE_VEC_ELT (targs, 0) = finish_decltype_type (init, id, tf_warning_or_error); if (type != auto_node) { if (complain & tf_error) error ("%qT as type rather than plain %<decltype(auto)%>", type); return error_mark_node; } } else { tree parms = build_tree_list (NULL_TREE, type); tree tparms; if (flag_concepts) tparms = extract_autos (type); else { tparms = make_tree_vec (1); TREE_VEC_ELT (tparms, 0) = build_tree_list (NULL_TREE, TYPE_NAME (auto_node)); } targs = make_tree_vec (TREE_VEC_LENGTH (tparms)); int val = type_unification_real (tparms, targs, parms, &init, 1, 0, DEDUCE_CALL, LOOKUP_NORMAL, NULL, /*explain_p=*/false); if (val > 0) { if (processing_template_decl) /* Try again at instantiation time. */ return type; if (type && type != error_mark_node && (complain & tf_error)) /* If type is error_mark_node a diagnostic must have been emitted by now. Also, having a mention to '<type error>' in the diagnostic is not really useful to the user. */ { if (cfun && auto_node == current_function_auto_return_pattern && LAMBDA_FUNCTION_P (current_function_decl)) error ("unable to deduce lambda return type from %qE", init); else error ("unable to deduce %qT from %qE", type, init); type_unification_real (tparms, targs, parms, &init, 1, 0, DEDUCE_CALL, LOOKUP_NORMAL, NULL, /*explain_p=*/true); } return error_mark_node; } } /* Check any placeholder constraints against the deduced type. */ if (flag_concepts && !processing_template_decl) if (tree constr = PLACEHOLDER_TYPE_CONSTRAINTS (auto_node)) { /* Use the deduced type to check the associated constraints. If we have a partial-concept-id, rebuild the argument list so that we check using the extra arguments. */ gcc_assert (TREE_CODE (constr) == CHECK_CONSTR); tree cargs = CHECK_CONSTR_ARGS (constr); if (TREE_VEC_LENGTH (cargs) > 1) { cargs = copy_node (cargs); TREE_VEC_ELT (cargs, 0) = TREE_VEC_ELT (targs, 0); } else cargs = targs; if (!constraints_satisfied_p (constr, cargs)) { if (complain & tf_warning_or_error) { switch (context) { case adc_unspecified: case adc_unify: error("placeholder constraints not satisfied"); break; case adc_variable_type: case adc_decomp_type: error ("deduced initializer does not satisfy " "placeholder constraints"); break; case adc_return_type: error ("deduced return type does not satisfy " "placeholder constraints"); break; case adc_requirement: error ("deduced expression type does not satisfy " "placeholder constraints"); break; } diagnose_constraints (input_location, constr, targs); } return error_mark_node; } } if (processing_template_decl && context != adc_unify) outer_targs = current_template_args (); targs = add_to_template_args (outer_targs, targs); return tsubst (type, targs, complain, NULL_TREE); } /* Substitutes LATE_RETURN_TYPE for 'auto' in TYPE and returns the result. */ tree splice_late_return_type (tree type, tree late_return_type) { if (is_auto (type)) { if (late_return_type) return late_return_type; tree idx = get_template_parm_index (type); if (TEMPLATE_PARM_LEVEL (idx) <= processing_template_decl) /* In an abbreviated function template we didn't know we were dealing with a function template when we saw the auto return type, so update it to have the correct level. */ return make_auto_1 (TYPE_IDENTIFIER (type), true); } return type; } /* Returns true iff TYPE is a TEMPLATE_TYPE_PARM representing 'auto' or 'decltype(auto)' or a deduced class template. */ bool is_auto (const_tree type) { if (TREE_CODE (type) == TEMPLATE_TYPE_PARM && (TYPE_IDENTIFIER (type) == auto_identifier || TYPE_IDENTIFIER (type) == decltype_auto_identifier || CLASS_PLACEHOLDER_TEMPLATE (type))) return true; else return false; } /* for_each_template_parm callback for type_uses_auto. */ int is_auto_r (tree tp, void */*data*/) { return is_auto (tp); } /* Returns the TEMPLATE_TYPE_PARM in TYPE representing `auto' iff TYPE contains a use of `auto'. Returns NULL_TREE otherwise. */ tree type_uses_auto (tree type) { if (type == NULL_TREE) return NULL_TREE; else if (flag_concepts) { /* The Concepts TS allows multiple autos in one type-specifier; just return the first one we find, do_auto_deduction will collect all of them. */ if (uses_template_parms (type)) return for_each_template_parm (type, is_auto_r, /*data*/NULL, /*visited*/NULL, /*nondeduced*/true); else return NULL_TREE; } else return find_type_usage (type, is_auto); } /* Report ill-formed occurrences of auto types in ARGUMENTS. If concepts are enabled, auto is acceptable in template arguments, but only when TEMPL identifies a template class. Return TRUE if any such errors were reported. */ bool check_auto_in_tmpl_args (tree tmpl, tree args) { /* If there were previous errors, nevermind. */ if (!args || TREE_CODE (args) != TREE_VEC) return false; /* If TMPL is an identifier, we're parsing and we can't tell yet whether TMPL is supposed to be a type, a function or a variable. We'll only be able to tell during template substitution, so we expect to be called again then. If concepts are enabled and we know we have a type, we're ok. */ if (flag_concepts && (identifier_p (tmpl) || (DECL_P (tmpl) && (DECL_TYPE_TEMPLATE_P (tmpl) || DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl))))) return false; /* Quickly search for any occurrences of auto; usually there won't be any, and then we'll avoid allocating the vector. */ if (!type_uses_auto (args)) return false; bool errors = false; tree vec = extract_autos (args); for (int i = 0; i < TREE_VEC_LENGTH (vec); i++) { tree xauto = TREE_VALUE (TREE_VEC_ELT (vec, i)); error_at (DECL_SOURCE_LOCATION (xauto), "invalid use of %qT in template argument", xauto); errors = true; } return errors; } /* For a given template T, return the vector of typedefs referenced in T for which access check is needed at T instantiation time. T is either a FUNCTION_DECL or a RECORD_TYPE. Those typedefs were added to T by the function append_type_to_template_for_access_check. */ vec<qualified_typedef_usage_t, va_gc> * get_types_needing_access_check (tree t) { tree ti; vec<qualified_typedef_usage_t, va_gc> *result = NULL; if (!t || t == error_mark_node) return NULL; if (!(ti = get_template_info (t))) return NULL; if (CLASS_TYPE_P (t) || TREE_CODE (t) == FUNCTION_DECL) { if (!TI_TEMPLATE (ti)) return NULL; result = TI_TYPEDEFS_NEEDING_ACCESS_CHECKING (ti); } return result; } /* Append the typedef TYPE_DECL used in template T to a list of typedefs tied to T. That list of typedefs will be access checked at T instantiation time. T is either a FUNCTION_DECL or a RECORD_TYPE. TYPE_DECL is a TYPE_DECL node representing a typedef. SCOPE is the scope through which TYPE_DECL is accessed. LOCATION is the location of the usage point of TYPE_DECL. This function is a subroutine of append_type_to_template_for_access_check. */ static void append_type_to_template_for_access_check_1 (tree t, tree type_decl, tree scope, location_t location) { qualified_typedef_usage_t typedef_usage; tree ti; if (!t || t == error_mark_node) return; gcc_assert ((TREE_CODE (t) == FUNCTION_DECL || CLASS_TYPE_P (t)) && type_decl && TREE_CODE (type_decl) == TYPE_DECL && scope); if (!(ti = get_template_info (t))) return; gcc_assert (TI_TEMPLATE (ti)); typedef_usage.typedef_decl = type_decl; typedef_usage.context = scope; typedef_usage.locus = location; vec_safe_push (TI_TYPEDEFS_NEEDING_ACCESS_CHECKING (ti), typedef_usage); } /* Append TYPE_DECL to the template TEMPL. TEMPL is either a class type, a FUNCTION_DECL or a a TEMPLATE_DECL. At TEMPL instanciation time, TYPE_DECL will be checked to see if it can be accessed through SCOPE. LOCATION is the location of the usage point of TYPE_DECL. e.g. consider the following code snippet: class C { typedef int myint; }; template<class U> struct S { C::myint mi; // <-- usage point of the typedef C::myint }; S<char> s; At S<char> instantiation time, we need to check the access of C::myint In other words, we need to check the access of the myint typedef through the C scope. For that purpose, this function will add the myint typedef and the scope C through which its being accessed to a list of typedefs tied to the template S. That list will be walked at template instantiation time and access check performed on each typedefs it contains. Note that this particular code snippet should yield an error because myint is private to C. */ void append_type_to_template_for_access_check (tree templ, tree type_decl, tree scope, location_t location) { qualified_typedef_usage_t *iter; unsigned i; gcc_assert (type_decl && (TREE_CODE (type_decl) == TYPE_DECL)); /* Make sure we don't append the type to the template twice. */ FOR_EACH_VEC_SAFE_ELT (get_types_needing_access_check (templ), i, iter) if (iter->typedef_decl == type_decl && scope == iter->context) return; append_type_to_template_for_access_check_1 (templ, type_decl, scope, location); } /* Convert the generic type parameters in PARM that match the types given in the range [START_IDX, END_IDX) from the current_template_parms into generic type packs. */ tree convert_generic_types_to_packs (tree parm, int start_idx, int end_idx) { tree current = current_template_parms; int depth = TMPL_PARMS_DEPTH (current); current = INNERMOST_TEMPLATE_PARMS (current); tree replacement = make_tree_vec (TREE_VEC_LENGTH (current)); for (int i = 0; i < start_idx; ++i) TREE_VEC_ELT (replacement, i) = TREE_TYPE (TREE_VALUE (TREE_VEC_ELT (current, i))); for (int i = start_idx; i < end_idx; ++i) { /* Create a distinct parameter pack type from the current parm and add it to the replacement args to tsubst below into the generic function parameter. */ tree o = TREE_TYPE (TREE_VALUE (TREE_VEC_ELT (current, i))); tree t = copy_type (o); TEMPLATE_TYPE_PARM_INDEX (t) = reduce_template_parm_level (TEMPLATE_TYPE_PARM_INDEX (o), o, 0, 0, tf_none); TREE_TYPE (TEMPLATE_TYPE_DECL (t)) = t; TYPE_STUB_DECL (t) = TYPE_NAME (t) = TEMPLATE_TYPE_DECL (t); TYPE_MAIN_VARIANT (t) = t; TEMPLATE_TYPE_PARAMETER_PACK (t) = true; TYPE_CANONICAL (t) = canonical_type_parameter (t); TREE_VEC_ELT (replacement, i) = t; TREE_VALUE (TREE_VEC_ELT (current, i)) = TREE_CHAIN (t); } for (int i = end_idx, e = TREE_VEC_LENGTH (current); i < e; ++i) TREE_VEC_ELT (replacement, i) = TREE_TYPE (TREE_VALUE (TREE_VEC_ELT (current, i))); /* If there are more levels then build up the replacement with the outer template parms. */ if (depth > 1) replacement = add_to_template_args (template_parms_to_args (TREE_CHAIN (current_template_parms)), replacement); return tsubst (parm, replacement, tf_none, NULL_TREE); } /* Entries in the decl_constraint hash table. */ struct GTY((for_user)) constr_entry { tree decl; tree ci; }; /* Hashing function and equality for constraint entries. */ struct constr_hasher : ggc_ptr_hash<constr_entry> { static hashval_t hash (constr_entry *e) { return (hashval_t)DECL_UID (e->decl); } static bool equal (constr_entry *e1, constr_entry *e2) { return e1->decl == e2->decl; } }; /* A mapping from declarations to constraint information. Note that both templates and their underlying declarations are mapped to the same constraint information. FIXME: This is defined in pt.c because garbage collection code is not being generated for constraint.cc. */ static GTY (()) hash_table<constr_hasher> *decl_constraints; /* Returns the template constraints of declaration T. If T is not constrained, return NULL_TREE. Note that T must be non-null. */ tree get_constraints (tree t) { if (!flag_concepts) return NULL_TREE; gcc_assert (DECL_P (t)); if (TREE_CODE (t) == TEMPLATE_DECL) t = DECL_TEMPLATE_RESULT (t); constr_entry elt = { t, NULL_TREE }; constr_entry* found = decl_constraints->find (&elt); if (found) return found->ci; else return NULL_TREE; } /* Associate the given constraint information CI with the declaration T. If T is a template, then the constraints are associated with its underlying declaration. Don't build associations if CI is NULL_TREE. */ void set_constraints (tree t, tree ci) { if (!ci) return; gcc_assert (t && flag_concepts); if (TREE_CODE (t) == TEMPLATE_DECL) t = DECL_TEMPLATE_RESULT (t); gcc_assert (!get_constraints (t)); constr_entry elt = {t, ci}; constr_entry** slot = decl_constraints->find_slot (&elt, INSERT); constr_entry* entry = ggc_alloc<constr_entry> (); *entry = elt; *slot = entry; } /* Remove the associated constraints of the declaration T. */ void remove_constraints (tree t) { gcc_assert (DECL_P (t)); if (TREE_CODE (t) == TEMPLATE_DECL) t = DECL_TEMPLATE_RESULT (t); constr_entry elt = {t, NULL_TREE}; constr_entry** slot = decl_constraints->find_slot (&elt, NO_INSERT); if (slot) decl_constraints->clear_slot (slot); } /* Memoized satisfaction results for declarations. This maps the pair (constraint_info, arguments) to the result computed by constraints_satisfied_p. */ struct GTY((for_user)) constraint_sat_entry { tree ci; tree args; tree result; }; /* Hashing function and equality for constraint entries. */ struct constraint_sat_hasher : ggc_ptr_hash<constraint_sat_entry> { static hashval_t hash (constraint_sat_entry *e) { hashval_t val = iterative_hash_object(e->ci, 0); return iterative_hash_template_arg (e->args, val); } static bool equal (constraint_sat_entry *e1, constraint_sat_entry *e2) { return e1->ci == e2->ci && comp_template_args (e1->args, e2->args); } }; /* Memoized satisfaction results for concept checks. */ struct GTY((for_user)) concept_spec_entry { tree tmpl; tree args; tree result; }; /* Hashing function and equality for constraint entries. */ struct concept_spec_hasher : ggc_ptr_hash<concept_spec_entry> { static hashval_t hash (concept_spec_entry *e) { return hash_tmpl_and_args (e->tmpl, e->args); } static bool equal (concept_spec_entry *e1, concept_spec_entry *e2) { ++comparing_specializations; bool eq = e1->tmpl == e2->tmpl && comp_template_args (e1->args, e2->args); --comparing_specializations; return eq; } }; static GTY (()) hash_table<constraint_sat_hasher> *constraint_memos; static GTY (()) hash_table<concept_spec_hasher> *concept_memos; /* Search for a memoized satisfaction result. Returns one of the truth value nodes if previously memoized, or NULL_TREE otherwise. */ tree lookup_constraint_satisfaction (tree ci, tree args) { constraint_sat_entry elt = { ci, args, NULL_TREE }; constraint_sat_entry* found = constraint_memos->find (&elt); if (found) return found->result; else return NULL_TREE; } /* Memoize the result of a satisfication test. Returns the saved result. */ tree memoize_constraint_satisfaction (tree ci, tree args, tree result) { constraint_sat_entry elt = {ci, args, result}; constraint_sat_entry** slot = constraint_memos->find_slot (&elt, INSERT); constraint_sat_entry* entry = ggc_alloc<constraint_sat_entry> (); *entry = elt; *slot = entry; return result; } /* Search for a memoized satisfaction result for a concept. */ tree lookup_concept_satisfaction (tree tmpl, tree args) { concept_spec_entry elt = { tmpl, args, NULL_TREE }; concept_spec_entry* found = concept_memos->find (&elt); if (found) return found->result; else return NULL_TREE; } /* Memoize the result of a concept check. Returns the saved result. */ tree memoize_concept_satisfaction (tree tmpl, tree args, tree result) { concept_spec_entry elt = {tmpl, args, result}; concept_spec_entry** slot = concept_memos->find_slot (&elt, INSERT); concept_spec_entry* entry = ggc_alloc<concept_spec_entry> (); *entry = elt; *slot = entry; return result; } static GTY (()) hash_table<concept_spec_hasher> *concept_expansions; /* Returns a prior concept specialization. This returns the substituted and normalized constraints defined by the concept. */ tree get_concept_expansion (tree tmpl, tree args) { concept_spec_entry elt = { tmpl, args, NULL_TREE }; concept_spec_entry* found = concept_expansions->find (&elt); if (found) return found->result; else return NULL_TREE; } /* Save a concept expansion for later. */ tree save_concept_expansion (tree tmpl, tree args, tree def) { concept_spec_entry elt = {tmpl, args, def}; concept_spec_entry** slot = concept_expansions->find_slot (&elt, INSERT); concept_spec_entry* entry = ggc_alloc<concept_spec_entry> (); *entry = elt; *slot = entry; return def; } static hashval_t hash_subsumption_args (tree t1, tree t2) { gcc_assert (TREE_CODE (t1) == CHECK_CONSTR); gcc_assert (TREE_CODE (t2) == CHECK_CONSTR); int val = 0; val = iterative_hash_object (CHECK_CONSTR_CONCEPT (t1), val); val = iterative_hash_template_arg (CHECK_CONSTR_ARGS (t1), val); val = iterative_hash_object (CHECK_CONSTR_CONCEPT (t2), val); val = iterative_hash_template_arg (CHECK_CONSTR_ARGS (t2), val); return val; } /* Compare the constraints of two subsumption entries. The LEFT1 and LEFT2 arguments comprise the first subsumption pair and the RIGHT1 and RIGHT2 arguments comprise the second. These are all CHECK_CONSTRs. */ static bool comp_subsumption_args (tree left1, tree left2, tree right1, tree right2) { if (CHECK_CONSTR_CONCEPT (left1) == CHECK_CONSTR_CONCEPT (right1)) if (CHECK_CONSTR_CONCEPT (left2) == CHECK_CONSTR_CONCEPT (right2)) if (comp_template_args (CHECK_CONSTR_ARGS (left1), CHECK_CONSTR_ARGS (right1))) return comp_template_args (CHECK_CONSTR_ARGS (left2), CHECK_CONSTR_ARGS (right2)); return false; } /* Key/value pair for learning and memoizing subsumption results. This associates a pair of check constraints (including arguments) with a boolean value indicating the result. */ struct GTY((for_user)) subsumption_entry { tree t1; tree t2; bool result; }; /* Hashing function and equality for constraint entries. */ struct subsumption_hasher : ggc_ptr_hash<subsumption_entry> { static hashval_t hash (subsumption_entry *e) { return hash_subsumption_args (e->t1, e->t2); } static bool equal (subsumption_entry *e1, subsumption_entry *e2) { ++comparing_specializations; bool eq = comp_subsumption_args(e1->t1, e1->t2, e2->t1, e2->t2); --comparing_specializations; return eq; } }; static GTY (()) hash_table<subsumption_hasher> *subsumption_table; /* Search for a previously cached subsumption result. */ bool* lookup_subsumption_result (tree t1, tree t2) { subsumption_entry elt = { t1, t2, false }; subsumption_entry* found = subsumption_table->find (&elt); if (found) return &found->result; else return 0; } /* Save a subsumption result. */ bool save_subsumption_result (tree t1, tree t2, bool result) { subsumption_entry elt = {t1, t2, result}; subsumption_entry** slot = subsumption_table->find_slot (&elt, INSERT); subsumption_entry* entry = ggc_alloc<subsumption_entry> (); *entry = elt; *slot = entry; return result; } /* Set up the hash table for constraint association. */ void init_constraint_processing (void) { if (!flag_concepts) return; decl_constraints = hash_table<constr_hasher>::create_ggc(37); constraint_memos = hash_table<constraint_sat_hasher>::create_ggc(37); concept_memos = hash_table<concept_spec_hasher>::create_ggc(37); concept_expansions = hash_table<concept_spec_hasher>::create_ggc(37); subsumption_table = hash_table<subsumption_hasher>::create_ggc(37); } /* __integer_pack(N) in a pack expansion expands to a sequence of numbers from 0..N-1. */ void declare_integer_pack (void) { tree ipfn = push_library_fn (get_identifier ("__integer_pack"), build_function_type_list (integer_type_node, integer_type_node, NULL_TREE), NULL_TREE, ECF_CONST); DECL_DECLARED_CONSTEXPR_P (ipfn) = true; DECL_BUILT_IN_CLASS (ipfn) = BUILT_IN_FRONTEND; } /* Set up the hash tables for template instantiations. */ void init_template_processing (void) { decl_specializations = hash_table<spec_hasher>::create_ggc (37); type_specializations = hash_table<spec_hasher>::create_ggc (37); if (cxx_dialect >= cxx11) declare_integer_pack (); } /* Print stats about the template hash tables for -fstats. */ void print_template_statistics (void) { fprintf (stderr, "decl_specializations: size %ld, %ld elements, " "%f collisions\n", (long) decl_specializations->size (), (long) decl_specializations->elements (), decl_specializations->collisions ()); fprintf (stderr, "type_specializations: size %ld, %ld elements, " "%f collisions\n", (long) type_specializations->size (), (long) type_specializations->elements (), type_specializations->collisions ()); } #if CHECKING_P namespace selftest { /* Verify that build_non_dependent_expr () works, for various expressions, and that location wrappers don't affect the results. */ static void test_build_non_dependent_expr () { location_t loc = BUILTINS_LOCATION; /* Verify constants, without and with location wrappers. */ tree int_cst = build_int_cst (integer_type_node, 42); ASSERT_EQ (int_cst, build_non_dependent_expr (int_cst)); tree wrapped_int_cst = maybe_wrap_with_location (int_cst, loc); ASSERT_TRUE (location_wrapper_p (wrapped_int_cst)); ASSERT_EQ (wrapped_int_cst, build_non_dependent_expr (wrapped_int_cst)); tree string_lit = build_string (4, "foo"); TREE_TYPE (string_lit) = char_array_type_node; string_lit = fix_string_type (string_lit); ASSERT_EQ (string_lit, build_non_dependent_expr (string_lit)); tree wrapped_string_lit = maybe_wrap_with_location (string_lit, loc); ASSERT_TRUE (location_wrapper_p (wrapped_string_lit)); ASSERT_EQ (wrapped_string_lit, build_non_dependent_expr (wrapped_string_lit)); } /* Verify that type_dependent_expression_p () works correctly, even in the presence of location wrapper nodes. */ static void test_type_dependent_expression_p () { location_t loc = BUILTINS_LOCATION; tree name = get_identifier ("foo"); /* If no templates are involved, nothing is type-dependent. */ gcc_assert (!processing_template_decl); ASSERT_FALSE (type_dependent_expression_p (name)); ++processing_template_decl; /* Within a template, an unresolved name is always type-dependent. */ ASSERT_TRUE (type_dependent_expression_p (name)); /* Ensure it copes with NULL_TREE and errors. */ ASSERT_FALSE (type_dependent_expression_p (NULL_TREE)); ASSERT_FALSE (type_dependent_expression_p (error_mark_node)); /* A USING_DECL in a template should be type-dependent, even if wrapped with a location wrapper (PR c++/83799). */ tree using_decl = build_lang_decl (USING_DECL, name, NULL_TREE); TREE_TYPE (using_decl) = integer_type_node; ASSERT_TRUE (type_dependent_expression_p (using_decl)); tree wrapped_using_decl = maybe_wrap_with_location (using_decl, loc); ASSERT_TRUE (location_wrapper_p (wrapped_using_decl)); ASSERT_TRUE (type_dependent_expression_p (wrapped_using_decl)); --processing_template_decl; } /* Run all of the selftests within this file. */ void cp_pt_c_tests () { test_build_non_dependent_expr (); test_type_dependent_expression_p (); } } // namespace selftest #endif /* #if CHECKING_P */ #include "gt-cp-pt.h"
d2d_memcpy.c
// RUN: %libomptarget-compile-generic && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-generic | %fcheck-generic -allow-empty #include <assert.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> const int magic_num = 7; int main(int argc, char *argv[]) { const int N = 128; const int num_devices = omp_get_num_devices(); // No target device, just return if (num_devices == 0) { printf("PASS\n"); return 0; } const int src_device = 0; int dst_device = num_devices - 1; int length = N * sizeof(int); int *src_ptr = omp_target_alloc(length, src_device); int *dst_ptr = omp_target_alloc(length, dst_device); assert(src_ptr && "src_ptr is NULL"); assert(dst_ptr && "dst_ptr is NULL"); #pragma omp target teams distribute parallel for device(src_device) \ is_device_ptr(src_ptr) for (int i = 0; i < N; ++i) { src_ptr[i] = magic_num; } int rc = omp_target_memcpy(dst_ptr, src_ptr, length, 0, 0, dst_device, src_device); assert(rc == 0 && "error in omp_target_memcpy"); int *buffer = malloc(length); assert(buffer && "failed to allocate host buffer"); #pragma omp target teams distribute parallel for device(dst_device) \ map(from: buffer[0:N]) is_device_ptr(dst_ptr) for (int i = 0; i < N; ++i) { buffer[i] = dst_ptr[i] + magic_num; } for (int i = 0; i < N; ++i) assert(buffer[i] == 2 * magic_num); printf("PASS\n"); // Free host and device memory free(buffer); omp_target_free(src_ptr, src_device); omp_target_free(dst_ptr, dst_device); return 0; } // CHECK: PASS
opencl_odf_aes_fmt_plug.c
/* Modified by Dhiru Kholia <dhiru at openwall.com> for ODF AES format. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_odf_aes; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_odf_aes); #else #include <string.h> #include <openssl/aes.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "misc.h" #include "options.h" #include "common.h" #include "formats.h" #include "common-opencl.h" #include "sha2.h" #define FORMAT_LABEL "ODF-AES-opencl" #define FORMAT_NAME "" #define ALGORITHM_NAME "SHA256 OpenCL AES" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 20 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(odf_cpu_salt) #define uint8_t unsigned char #define uint16_t unsigned short #define uint32_t unsigned int typedef struct { uint32_t length; uint8_t v[32]; // hash of password } odf_password; typedef struct { uint32_t v[32/4]; } odf_hash; typedef struct { uint8_t length; uint8_t salt[64]; uint32_t iterations; uint32_t outlen; } odf_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)]; typedef struct { int cipher_type; int checksum_type; int iterations; int key_size; int iv_length; int salt_length; int content_length; unsigned char iv[16]; unsigned char salt[32]; unsigned char content[1024]; } odf_cpu_salt; static odf_cpu_salt *cur_salt; static struct fmt_tests tests[] = { {"$odf$*1*1*1024*32*61802eba18eab842de1d053809ba40927fd40b26c69ddeca6a8a652ed9c16a28*16*c5c0815b931f313627100d592a9c972f*16*e9a48b7daff738deaabe442007fb2ec4*0*be3b65ea09642c2b4fdc23e553e1f5304bc5df222b624c6373d53e674f5df01fdb8873cdab7a5a685fa45ad5441a9d8869401b7fa076c488ad53fd9971e97244ecc9416484450d4fb2ee4ec08af4044d7def937e6545dea2ce36bd5c57b1f46b11b9cf90c8fb3accff149ce2d54820b181b9124db9aac131f6436d77cf716423f04d42438eed6f9ca14bd24b9b17d3478176addd5fa0254bf986fccd879e326485790e28b94ad5306868734b5ac1b1ddb3f876382dee6e9428e8230e84bf11b7e85ccbae8b4b424cd73160c380f874b37fbe3c7e88c13ef4bde74b56507d17095c2c32bb8bcded0637e4403107bb33252f72f5886a91b7720fe32a8659a09c217717e4c74a7c2e09fc40b46aa288309a36e86b9f1856e1bce176bc9690555431e05c7b67ff95df64f8f40053079bfc9dda021ab2714fecf74398b867ebef675958f29eaa15eb631845e358a0c5caff0b824a2a69a6eabee069d3d6236d77709fd60438c9e3ad9e42b26810375e1e587eff105ac295327ef8bf66f6462388b7727ec32d6abde2f8d6126b185124bb437753663f6ab1f321ddfdb36d9f1f528729492e0b1bb8d3b9eda3c86c1997c92b902f5160f77587c37e45b5c133b5d9709fea910a2e9b54c0960b0ebc870cdbb858aabe07ed27cba86d29a7e64c6e3863131859314a14e64c1168d4a2d5ca0697853fb1fe969ba968e31359881d51edce287eff415de8e60cec2068bb82157fbcf0cf9a95e92cb23f32e6156daced4bee6ba8c8b41174d01fcd7662911bcc10d5b4478f8209ce3b91075d10529780be4f17e841a1f1833d432c3dc854908643e58b03c8860dfbc710a29f79f75ea262cfcef9cd67fb67d73f55b300d42f4577445af2b9f224620204cfb88de2cbf57931ac0e0f8d98259a41d744cad6a58abc7761c266f4e93aca19356b07073c09ae9d1976f4f2e1a76c350cc7764c27ae257eb69ba4213dd0a7794fa83d220439a398efd988b6dbf0de4c08bc3e4830c9e482b9e0fd1679f14e6f132cf06bae1d763dde7ce6f525ff9a0ebad28aeca16496194f2a6263a20e7afeb43d83c8c936130d6508f2bf68b5ca50375948424193a7fb1106fdf63ff72896e1b2633907f01a693218e3303436542bcf2af24cc4a41621c36768ce9a84d32cc9f3c2b108bfc78c25b1c2ea94e6e0d65406f78bdb8bc33c94a9550e5cc3e995cfbd31da03afb929418acdc89b099415f9bdb7dab7a75d44a696e14b031d601ad8d907e14a28044706c0c2955df2cb34ffea82af367e487b6cc928dc87a33fc7555173e7faa5cfd1af6d3d6f496f23a9579db22dd4a2c16e950fdc90696d95a81183765a4fbddb42c488d40ac1de28483cf1cdddf821d3f859c57b13cb7f21a916bd0d89438a17634c68637f23e2544589e8ae5ee5bced91680c087cb3105cd74a09e88d3aae17d75e", "test"}, /* CMIYC 2013 "pro" hard hash */ {"$odf$*1*1*1024*32*7db40092b3857fa319bc0d717b60cefc40b1d51ef92ebc893c518ffebffdf200*16*5f7c8ab6e5d1c41dbd23c384fee957ed*16*9ff092f2dd29dab6ce5fb43ad7bbdd5a*0*bac8343436715b40aaf4690a7dc57b0f82b8f25f8ad0f9833e32468410d4dd02e387a067872b5847adc9a276c86a03113e11b903854202eec361c5b7ba74bcb254a4f76d97ca45dbe30fe49f78ce9cf7df0246ae4524b8f13ad28357838559c116d9ed59267f4df91da3ea9758c132e2ebc40fd4ee8e9978921a0847d7ca5c30ef911e0b88f9fc84039633eacf5e023c82dd1a573abd7663b8f36a039d42ed91b4a0665902f174be8cefefd367ba9b5da95768550e567242f1b2e2c3866eb8aa3c12d0b34277929616319ea29dd9a3b9addb963d45c7d4c2b54a99b0c1cf24cac3e981ed4e178e621938b83be30f54d37d6425a0b7ac9dff5504830fe1d1f136913c32d8f732eb55e6179ad2699fd851af3a44f8ca914117344e6fadf501bf6f6e0ae7970a2b58eb3af0d89c78411c6adde8aa1f0e8b69c261fd04835cdc3ddf0a6d67ddff33995b5cc7439db83f90c8a2e07e2513771fffcf8b55ce1a382b14ffbf22be9bdd6f83a9b7602995c9793dfffb32c9eb16930c0bb55e5a8364fa06a59fca5af27df4a02565db2b4718ed44405f67a052738692c189039a7fd63713207616eeeebace3c0a3963dd882c485523f49fa0bc2663fc6ef090a220dd5c6554bc0702da8c3122383ea8a009837d549d58ad688c9cc4b8461fe70f4600539cd1d82edd4e110b1c1472dae40adc3126e2a09dd2753dcd83799841745160e235652f601d1257268321f22d19bd9dc811afaf143765c7cb53717ea329e9e4064a3cf54b33d006e93b83102e2ad3327f6d995cb598bd96466b1287e6da9967f4f034c63fd06c6e5c7ec25008c122385f271d18918cff3823f9fbdb37791e7371ce1d6a4ab08c12eca5fceb7c9aa7ce25a8bd640a68c622ddd858973426cb28e65c4c3421b98ebf4916b8c2bfe71b2afec4ab2f99291a4c4d3312521850d46436aecd9e2e93a8619dbc3c1caf4507bb488ce921cd8d13a1640e6c49403e0416924b3b1a01c9939c7bcdec50f057d6f4dccf0afc8c2ad37c4f8429c77cf19ad49db5e5219e965a3ed5d56d799689bd93642602d7959df0493ea62cccff83e66d85bf45d6b5b03e8cfca84daf37ecfccb60f85f3c5102900a02a5df015b1bf1ef55dfb2ab20321bcf3325d1adce22d4456837dcc589ef36d4f06ccdcc96ef10ff806d76f0044e92e192b946ae0f09860a38c2a6052fe84c3e9bb9380e2b344812376c6bbd5c9858745dbd072798a3d7eff31ae5d509c11b5269ec6f2108cb6e72a5ab495ea7aed5bf3dabedbb517dc4ceff818a8e890a6ea9a91bab37e8a463a9d04993c5ba7e40e743e033842540806d4a65258d0f4d5988e1e0011f0e85fcae3b2819c1f17f5c7980ecd87aee425cdab4f34bfb7a31ee7936c60f2f4f52aea67aef4736a419dc9c559279b569f61995eb2d6b7c204c3e9f56ca5c8a889812a30c33", "juNK^r00M!"}, {NULL} }; static cl_int cl_error; static odf_password *inbuffer; static odf_hash *outbuffer; static odf_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; size_t insize, outsize, settingsize, cracked_size; #define MIN(a, b) (((a) > (b)) ? (b) : (a)) #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #define OCL_CONFIG "odf-aes" #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static size_t get_task_max_size() { return 0; } static size_t get_default_workgroup() { if (cpu(device_info[gpu_id])) return get_platform_vendor_id(platform_id) == DEV_INTEL ? 8 : 1; else return 64; } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(odf_password) * gws; outsize = sizeof(odf_hash) * gws; settingsize = sizeof(odf_salt); cracked_size = sizeof(*crypt_out) * gws; inbuffer = mem_calloc(insize); outbuffer = mem_alloc(outsize); saved_key = mem_calloc(sizeof(*saved_key) * gws); crypt_out = mem_calloc(cracked_size); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(saved_key); MEM_FREE(crypt_out); } static void done(void) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); } static void init(struct fmt_main *self) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", (int)sizeof(inbuffer->v), (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(odf_password), 0); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int res; if (strncmp(ciphertext, "$odf$*", 6)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 6; if ((p = strtok(ctcopy, "*")) == NULL) /* cipher type */ goto err; res = atoi(p); if (res != 1) { goto err; } if ((p = strtok(NULL, "*")) == NULL) /* checksum type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtok(NULL, "*")) == NULL) /* iterations */ goto err; if ((p = strtok(NULL, "*")) == NULL) /* key size */ goto err; res = atoi(p); if (res != 16 && res != 32) goto err; if ((p = strtok(NULL, "*")) == NULL) /* checksum field (skipped) */ goto err; if ((p = strtok(NULL, "*")) == NULL) /* iv length */ goto err; res = atoi(p); if (res > 16) goto err; if ((p = strtok(NULL, "*")) == NULL) /* iv */ goto err; if (strlen(p) != res * 2) goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "*")) == NULL) /* salt length */ goto err; res = atoi(p); if (res > 32) goto err; if ((p = strtok(NULL, "*")) == NULL) /* salt */ goto err; if (strlen(p) != res * 2) goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "*")) == NULL) /* something */ goto err; if ((p = strtok(NULL, "*")) == NULL) /* content */ goto err; res = strlen(p); if (res > 2048 || res & 1) goto err; if (!ishex(p)) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static odf_cpu_salt cs; ctcopy += 6; /* skip over "$odf$*" */ p = strtok(ctcopy, "*"); cs.cipher_type = atoi(p); p = strtok(NULL, "*"); cs.checksum_type = atoi(p); p = strtok(NULL, "*"); cs.iterations = atoi(p); p = strtok(NULL, "*"); cs.key_size = atoi(p); p = strtok(NULL, "*"); /* skip checksum field */ p = strtok(NULL, "*"); cs.iv_length = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < cs.iv_length; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); cs.salt_length = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < cs.salt_length; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); p = strtok(NULL, "*"); memset(cs.content, 0, sizeof(cs.content)); for (i = 0; p[i * 2] && i < 1024; i++) cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; cs.content_length = i; MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; ctcopy += 6; /* skip over "$odf$*" */ p = strtok(ctcopy, "*"); p = strtok(NULL, "*"); p = strtok(NULL, "*"); p = strtok(NULL, "*"); p = strtok(NULL, "*"); for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } MEM_FREE(keeptr); return out; } static void set_salt(void *salt) { cur_salt = (odf_cpu_salt*)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->salt_length); currentsalt.length = cur_salt->salt_length; currentsalt.iterations = cur_salt->iterations; currentsalt.outlen = cur_salt->key_size; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } #undef set_key static void set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index; global_work_size = (count + local_work_size - 1) / local_work_size * local_work_size; #ifdef _OPENMP #pragma omp parallel for #endif for(index = 0; index < count; index++) { unsigned char hash[32]; SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, (unsigned char *)saved_key[index], strlen(saved_key[index])); SHA256_Final((unsigned char *)hash, &ctx); memcpy(inbuffer[index].v, hash, 32); inbuffer[index].length = 32; } /// Copy data to gpu HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); #ifdef _OPENMP #pragma omp parallel for #endif for(index = 0; index < count; index++) { AES_KEY akey; unsigned char iv[32]; SHA256_CTX ctx; unsigned char pt[1024]; memcpy(iv, cur_salt->iv, 32); memset(&akey, 0, sizeof(AES_KEY)); AES_set_decrypt_key((unsigned char*)outbuffer[index].v, 256, &akey); AES_cbc_encrypt(cur_salt->content, pt, cur_salt->content_length, &akey, iv, AES_DECRYPT); SHA256_Init(&ctx); SHA256_Update(&ctx, pt, cur_salt->content_length); SHA256_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } #if FMT_MAIN_VERSION > 11 /* * The format tests all have iteration count 1024. * Just in case the iteration count is tunable, let's report it. */ static unsigned int iteration_count(void *salt) { odf_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations; } #endif struct fmt_main fmt_opencl_odf_aes = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, 4, SALT_SIZE, 1, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { "iteration count", }, #endif tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { iteration_count, }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
image.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % % % MagickCore Image Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/delegate.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/magick-private.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/timer.h" #include "MagickCore/timer-private.h" #include "MagickCore/token.h" #include "MagickCore/token-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" #include "MagickCore/xwindow-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImage() returns a pointer to an image structure initialized to % default values. % % The format of the AcquireImage method is: % % Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AcquireImage(const ImageInfo *image_info, ExceptionInfo *exception) { const char *option; Image *image; MagickStatusType flags; /* Allocate image structure. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image=(Image *) AcquireCriticalMemory(sizeof(*image)); (void) memset(image,0,sizeof(*image)); /* Initialize Image structure. */ (void) CopyMagickString(image->magick,"MIFF",MagickPathExtent); image->storage_class=DirectClass; image->depth=MAGICKCORE_QUANTUM_DEPTH; image->colorspace=sRGBColorspace; image->rendering_intent=PerceptualIntent; image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.red_primary.z=0.0300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.green_primary.z=0.1000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.blue_primary.z=0.7900f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->chromaticity.white_point.z=0.3583f; image->interlace=NoInterlace; image->ticks_per_second=UndefinedTicksPerSecond; image->compose=OverCompositeOp; (void) QueryColorCompliance(MatteColor,AllCompliance,&image->matte_color, exception); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image->transparent_color,exception); GetTimerInfo(&image->timer); image->cache=AcquirePixelCache(0); image->channel_mask=DefaultChannels; image->channel_map=AcquirePixelChannelMap(); image->blob=CloneBlobInfo((BlobInfo *) NULL); image->timestamp=GetMagickTime(); image->debug=IsEventLogging(); image->reference_count=1; image->semaphore=AcquireSemaphoreInfo(); image->signature=MagickCoreSignature; if (image_info == (ImageInfo *) NULL) return(image); /* Transfer image info. */ SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue : MagickFalse); (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick,image_info->magick,MagickPathExtent); if (image_info->size != (char *) NULL) { (void) ParseAbsoluteGeometry(image_info->size,&image->extract_info); image->columns=image->extract_info.width; image->rows=image->extract_info.height; image->offset=image->extract_info.x; image->extract_info.x=0; image->extract_info.y=0; } if (image_info->extract != (char *) NULL) { RectangleInfo geometry; (void) memset(&geometry,0,sizeof(geometry)); flags=ParseAbsoluteGeometry(image_info->extract,&geometry); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { image->extract_info=geometry; Swap(image->columns,image->extract_info.width); Swap(image->rows,image->extract_info.height); } } image->compression=image_info->compression; image->quality=image_info->quality; image->endian=image_info->endian; image->interlace=image_info->interlace; image->units=image_info->units; if (image_info->density != (char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(image_info->density,&geometry_info); if ((flags & RhoValue) != 0) image->resolution.x=geometry_info.rho; image->resolution.y=image->resolution.x; if ((flags & SigmaValue) != 0) image->resolution.y=geometry_info.sigma; } if (image_info->page != (char *) NULL) { char *geometry; image->page=image->extract_info; geometry=GetPageGeometry(image_info->page); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } if (image_info->depth != 0) image->depth=image_info->depth; image->dither=image_info->dither; image->matte_color=image_info->matte_color; image->background_color=image_info->background_color; image->border_color=image_info->border_color; image->transparent_color=image_info->transparent_color; image->ping=image_info->ping; image->progress_monitor=image_info->progress_monitor; image->client_data=image_info->client_data; if (image_info->cache != (void *) NULL) ClonePixelCacheMethods(image->cache,image_info->cache); /* Set all global options that map to per-image settings. */ (void) SyncImageSettings(image_info,image,exception); /* Global options that are only set for new images. */ option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (image->delay > (size_t) floor(geometry_info.rho+0.5)) image->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (image->delay < (size_t) floor(geometry_info.rho+0.5)) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } else image->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions, MagickFalse,option); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageInfo() allocates the ImageInfo structure. % % The format of the AcquireImageInfo method is: % % ImageInfo *AcquireImageInfo(void) % */ MagickExport ImageInfo *AcquireImageInfo(void) { ImageInfo *image_info; image_info=(ImageInfo *) AcquireCriticalMemory(sizeof(*image_info)); GetImageInfo(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % The format of the AcquireNextImage method is: % % void AcquireNextImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { /* Allocate image structure. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->next=AcquireImage(image_info,exception); if (GetNextImageInList(image) == (Image *) NULL) return; (void) CopyMagickString(GetNextImageInList(image)->filename,image->filename, MagickPathExtent); if (image_info != (ImageInfo *) NULL) (void) CopyMagickString(GetNextImageInList(image)->filename, image_info->filename,MagickPathExtent); DestroyBlob(GetNextImageInList(image)); image->next->blob=ReferenceBlob(image->blob); image->next->endian=image->endian; image->next->scene=image->scene+1; image->next->previous=image; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A p p e n d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AppendImages() takes all images from the current image pointer to the end % of the image list and appends them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting effects how the image is justified in the % final image. % % The format of the AppendImages method is: % % Image *AppendImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AppendImages(const Image *images, const MagickBooleanType stack,ExceptionInfo *exception) { #define AppendImageTag "Append/Image" CacheView *append_view; Image *append_image; MagickBooleanType homogeneous_colorspace, status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; register const Image *next; size_t depth, height, number_images, width; ssize_t x_offset, y, y_offset; /* Compute maximum area of appended area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); alpha_trait=images->alpha_trait; number_images=1; width=images->columns; height=images->rows; depth=images->depth; homogeneous_colorspace=MagickTrue; next=GetNextImageInList(images); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->depth > depth) depth=next->depth; if (next->colorspace != images->colorspace) homogeneous_colorspace=MagickFalse; if (next->alpha_trait != UndefinedPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; continue; } width+=next->columns; if (next->rows > height) height=next->rows; } /* Append images. */ append_image=CloneImage(images,width,height,MagickTrue,exception); if (append_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse) { append_image=DestroyImage(append_image); return((Image *) NULL); } if (homogeneous_colorspace == MagickFalse) (void) SetImageColorspace(append_image,sRGBColorspace,exception); append_image->depth=depth; append_image->alpha_trait=alpha_trait; append_image->page=images->page; (void) SetImageBackgroundColor(append_image,exception); status=MagickTrue; x_offset=0; y_offset=0; next=images; append_view=AcquireAuthenticCacheView(append_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { CacheView *image_view; MagickBooleanType proceed; SetGeometry(append_image,&geometry); GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry); if (stack != MagickFalse) x_offset-=geometry.x; else y_offset-=geometry.y; image_view=AcquireVirtualCacheView(next,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(next,next,next->rows,1) #endif for (y=0; y < (ssize_t) next->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset, next->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } GetPixelInfo(next,&pixel); for (x=0; x < (ssize_t) next->columns; x++) { GetPixelInfoPixel(next,p,&pixel); SetPixelViaPixelInfo(append_image,&pixel,q); p+=GetPixelChannels(next); q+=GetPixelChannels(append_image); } sync=SyncCacheViewAuthenticPixels(append_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (stack == MagickFalse) { x_offset+=(ssize_t) next->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) next->rows; } proceed=SetImageProgress(append_image,AppendImageTag,n,number_images); if (proceed == MagickFalse) break; next=GetNextImageInList(next); } append_view=DestroyCacheView(append_view); if (status == MagickFalse) append_image=DestroyImage(append_image); return(append_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a t c h I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CatchImageException() returns if no exceptions are found in the image % sequence, otherwise it determines the most severe exception and reports % it as a warning or error depending on the severity. % % The format of the CatchImageException method is: % % ExceptionType CatchImageException(Image *image) % % A description of each parameter follows: % % o image: An image sequence. % */ MagickExport ExceptionType CatchImageException(Image *image) { ExceptionInfo *exception; ExceptionType severity; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=AcquireExceptionInfo(); CatchException(exception); severity=exception->severity; exception=DestroyExceptionInfo(exception); return(severity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipImagePath() sets the image clip mask based any clipping path information % if it exists. % % The format of the ClipImagePath method is: % % MagickBooleanType ClipImagePath(Image *image,const char *pathname, % const MagickBooleanType inside,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception) { return(ClipImagePath(image,"#1",MagickTrue,exception)); } MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname, const MagickBooleanType inside,ExceptionInfo *exception) { #define ClipImagePathTag "ClipPath/Image" char *property; const char *value; Image *clip_mask; ImageInfo *image_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pathname != NULL); property=AcquireString(pathname); (void) FormatLocaleString(property,MagickPathExtent,"8BIM:1999,2998:%s", pathname); value=GetImageProperty(image,property,exception); property=DestroyString(property); if (value == (const char *) NULL) { ThrowFileException(exception,OptionError,"NoClipPathDefined", image->filename); return(MagickFalse); } image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->filename,image->filename, MagickPathExtent); (void) ConcatenateMagickString(image_info->filename,pathname, MagickPathExtent); clip_mask=BlobToImage(image_info,value,strlen(value),exception); image_info=DestroyImageInfo(image_info); if (clip_mask == (Image *) NULL) return(MagickFalse); if (clip_mask->storage_class == PseudoClass) { (void) SyncImage(clip_mask,exception); if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (inside == MagickFalse) (void) NegateImage(clip_mask,MagickFalse,exception); (void) FormatLocaleString(clip_mask->magick_filename,MagickPathExtent, "8BIM:1999,2998:%s\nPS",pathname); (void) SetImageMask(image,WritePixelMask,clip_mask,exception); image->mask_trait=UpdatePixelTrait; clip_mask=DestroyImage(clip_mask); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImage() copies an image and returns the copy as a new image object. % % If the specified columns and rows is 0, an exact copy of the image is % returned, otherwise the pixel data is undefined and must be initialized % with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On % failure, a NULL image is returned and exception describes the reason for the % failure. % % The format of the CloneImage method is: % % Image *CloneImage(const Image *image,const size_t columns, % const size_t rows,const MagickBooleanType orphan, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the cloned image. % % o rows: the number of rows in the cloned image. % % o detach: With a value other than 0, the cloned image is detached from % its parent I/O stream. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CloneImage(const Image *image,const size_t columns, const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception) { Image *clone_image; double scale; size_t length; /* Clone the image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns == 0) || (image->rows == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "NegativeOrZeroImageSize","`%s'",image->filename); return((Image *) NULL); } clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image)); (void) memset(clone_image,0,sizeof(*clone_image)); clone_image->signature=MagickCoreSignature; clone_image->storage_class=image->storage_class; clone_image->number_channels=image->number_channels; clone_image->number_meta_channels=image->number_meta_channels; clone_image->metacontent_extent=image->metacontent_extent; clone_image->colorspace=image->colorspace; clone_image->alpha_trait=image->alpha_trait; clone_image->channels=image->channels; clone_image->mask_trait=image->mask_trait; clone_image->columns=image->columns; clone_image->rows=image->rows; clone_image->dither=image->dither; clone_image->image_info=CloneImageInfo(image->image_info); (void) CloneImageProfiles(clone_image,image); (void) CloneImageProperties(clone_image,image); (void) CloneImageArtifacts(clone_image,image); GetTimerInfo(&clone_image->timer); if (image->ascii85 != (void *) NULL) Ascii85Initialize(clone_image); clone_image->extent=image->extent; clone_image->magick_columns=image->magick_columns; clone_image->magick_rows=image->magick_rows; clone_image->type=image->type; clone_image->channel_mask=image->channel_mask; clone_image->channel_map=ClonePixelChannelMap(image->channel_map); (void) CopyMagickString(clone_image->magick_filename,image->magick_filename, MagickPathExtent); (void) CopyMagickString(clone_image->magick,image->magick,MagickPathExtent); (void) CopyMagickString(clone_image->filename,image->filename, MagickPathExtent); clone_image->progress_monitor=image->progress_monitor; clone_image->client_data=image->client_data; clone_image->reference_count=1; clone_image->next=image->next; clone_image->previous=image->previous; clone_image->list=NewImageList(); if (detach == MagickFalse) clone_image->blob=ReferenceBlob(image->blob); else { clone_image->next=NewImageList(); clone_image->previous=NewImageList(); clone_image->blob=CloneBlobInfo((BlobInfo *) NULL); } clone_image->ping=image->ping; clone_image->debug=IsEventLogging(); clone_image->semaphore=AcquireSemaphoreInfo(); if (image->colormap != (PixelInfo *) NULL) { /* Allocate and copy the image colormap. */ clone_image->colors=image->colors; length=(size_t) image->colors; clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length+1, sizeof(*clone_image->colormap)); if (clone_image->colormap == (PixelInfo *) NULL) { clone_image=DestroyImage(clone_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memcpy(clone_image->colormap,image->colormap,length* sizeof(*clone_image->colormap)); } if ((columns == 0) || (rows == 0)) { if (image->montage != (char *) NULL) (void) CloneString(&clone_image->montage,image->montage); if (image->directory != (char *) NULL) (void) CloneString(&clone_image->directory,image->directory); clone_image->cache=ReferencePixelCache(image->cache); return(clone_image); } scale=1.0; if (image->columns != 0) scale=(double) columns/(double) image->columns; clone_image->page.width=(size_t) floor(scale*image->page.width+0.5); clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5); clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5); scale=1.0; if (image->rows != 0) scale=(double) rows/(double) image->rows; clone_image->page.height=(size_t) floor(scale*image->page.height+0.5); clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5); clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5); clone_image->cache=ClonePixelCache(image->cache); if (SetImageExtent(clone_image,columns,rows,exception) == MagickFalse) clone_image=DestroyImage(clone_image); return(clone_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageInfo() makes a copy of the given image info structure. If % NULL is specified, a new image info structure is created initialized to % default values. % % The format of the CloneImageInfo method is: % % ImageInfo *CloneImageInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info) { ImageInfo *clone_info; clone_info=AcquireImageInfo(); if (image_info == (ImageInfo *) NULL) return(clone_info); clone_info->compression=image_info->compression; clone_info->temporary=image_info->temporary; clone_info->adjoin=image_info->adjoin; clone_info->antialias=image_info->antialias; clone_info->scene=image_info->scene; clone_info->number_scenes=image_info->number_scenes; clone_info->depth=image_info->depth; if (image_info->size != (char *) NULL) (void) CloneString(&clone_info->size,image_info->size); if (image_info->extract != (char *) NULL) (void) CloneString(&clone_info->extract,image_info->extract); if (image_info->scenes != (char *) NULL) (void) CloneString(&clone_info->scenes,image_info->scenes); if (image_info->page != (char *) NULL) (void) CloneString(&clone_info->page,image_info->page); clone_info->interlace=image_info->interlace; clone_info->endian=image_info->endian; clone_info->units=image_info->units; clone_info->quality=image_info->quality; if (image_info->sampling_factor != (char *) NULL) (void) CloneString(&clone_info->sampling_factor, image_info->sampling_factor); if (image_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,image_info->server_name); if (image_info->font != (char *) NULL) (void) CloneString(&clone_info->font,image_info->font); if (image_info->texture != (char *) NULL) (void) CloneString(&clone_info->texture,image_info->texture); if (image_info->density != (char *) NULL) (void) CloneString(&clone_info->density,image_info->density); clone_info->pointsize=image_info->pointsize; clone_info->fuzz=image_info->fuzz; clone_info->matte_color=image_info->matte_color; clone_info->background_color=image_info->background_color; clone_info->border_color=image_info->border_color; clone_info->transparent_color=image_info->transparent_color; clone_info->dither=image_info->dither; clone_info->monochrome=image_info->monochrome; clone_info->colorspace=image_info->colorspace; clone_info->type=image_info->type; clone_info->orientation=image_info->orientation; clone_info->ping=image_info->ping; clone_info->verbose=image_info->verbose; clone_info->progress_monitor=image_info->progress_monitor; clone_info->client_data=image_info->client_data; clone_info->cache=image_info->cache; if (image_info->cache != (void *) NULL) clone_info->cache=ReferencePixelCache(image_info->cache); if (image_info->profile != (void *) NULL) clone_info->profile=(void *) CloneStringInfo((StringInfo *) image_info->profile); SetImageInfoFile(clone_info,image_info->file); SetImageInfoBlob(clone_info,image_info->blob,image_info->length); clone_info->stream=image_info->stream; clone_info->custom_stream=image_info->custom_stream; (void) CopyMagickString(clone_info->magick,image_info->magick, MagickPathExtent); (void) CopyMagickString(clone_info->unique,image_info->unique, MagickPathExtent); (void) CopyMagickString(clone_info->filename,image_info->filename, MagickPathExtent); clone_info->channel=image_info->channel; (void) CloneImageOptions(clone_info,image_info); clone_info->debug=IsEventLogging(); clone_info->signature=image_info->signature; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o p y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CopyImagePixels() copies pixels from the source image as defined by the % geometry the destination image at the specified offset. % % The format of the CopyImagePixels method is: % % MagickBooleanType CopyImagePixels(Image *image,const Image *source_image, % const RectangleInfo *geometry,const OffsetInfo *offset, % ExceptionInfo *exception); % % A description of each parameter follows: % % o image: the destination image. % % o source_image: the source image. % % o geometry: define the dimensions of the source pixel rectangle. % % o offset: define the offset in the destination image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CopyImagePixels(Image *image, const Image *source_image,const RectangleInfo *geometry, const OffsetInfo *offset,ExceptionInfo *exception) { #define CopyImageTag "Copy/Image" CacheView *image_view, *source_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(source_image != (Image *) NULL); assert(geometry != (RectangleInfo *) NULL); assert(offset != (OffsetInfo *) NULL); if ((offset->x < 0) || (offset->y < 0) || ((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) || ((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows)) ThrowBinaryException(OptionError,"GeometryDoesNotContainImage", image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); /* Copy image pixels. */ status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,source_image,geometry->height,1) #endif for (y=0; y < (ssize_t) geometry->height; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y, geometry->width,1,exception); q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y, geometry->width,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) geometry->width; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0) || (source_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CopyImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImage() dereferences an image, deallocating memory associated with % the image if the reference count becomes zero. % % The format of the DestroyImage method is: % % Image *DestroyImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *DestroyImage(Image *image) { MagickBooleanType destroy; /* Dereference image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); destroy=MagickFalse; LockSemaphoreInfo(image->semaphore); image->reference_count--; if (image->reference_count == 0) destroy=MagickTrue; UnlockSemaphoreInfo(image->semaphore); if (destroy == MagickFalse) return((Image *) NULL); /* Destroy image. */ DestroyImagePixels(image); image->channel_map=DestroyPixelChannelMap(image->channel_map); if (image->montage != (char *) NULL) image->montage=DestroyString(image->montage); if (image->directory != (char *) NULL) image->directory=DestroyString(image->directory); if (image->colormap != (PixelInfo *) NULL) image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); if (image->geometry != (char *) NULL) image->geometry=DestroyString(image->geometry); DestroyImageProfiles(image); DestroyImageProperties(image); DestroyImageArtifacts(image); if (image->ascii85 != (Ascii85Info *) NULL) image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85); if (image->image_info != (ImageInfo *) NULL) image->image_info=DestroyImageInfo(image->image_info); DestroyBlob(image); if (image->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&image->semaphore); image->signature=(~MagickCoreSignature); image=(Image *) RelinquishMagickMemory(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageInfo() deallocates memory associated with an ImageInfo % structure. % % The format of the DestroyImageInfo method is: % % ImageInfo *DestroyImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); if (image_info->scenes != (char *) NULL) image_info->scenes=DestroyString(image_info->scenes); if (image_info->page != (char *) NULL) image_info->page=DestroyString(image_info->page); if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); if (image_info->cache != (void *) NULL) image_info->cache=DestroyPixelCache(image_info->cache); if (image_info->profile != (StringInfo *) NULL) image_info->profile=(void *) DestroyStringInfo((StringInfo *) image_info->profile); DestroyImageOptions(image_info); image_info->signature=(~MagickCoreSignature); image_info=(ImageInfo *) RelinquishMagickMemory(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s a s s o c i a t e I m a g e S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DisassociateImageStream() disassociates the image stream. It checks if the % blob of the specified image is referenced by other images. If the reference % count is higher then 1 a new blob is assigned to the specified image. % % The format of the DisassociateImageStream method is: % % void DisassociateImageStream(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DisassociateImageStream(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); DisassociateBlob(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfo() initializes image_info to default values. % % The format of the GetImageInfo method is: % % void GetImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport void GetImageInfo(ImageInfo *image_info) { char *synchronize; ExceptionInfo *exception; /* File and image dimension members. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info != (ImageInfo *) NULL); (void) memset(image_info,0,sizeof(*image_info)); image_info->adjoin=MagickTrue; image_info->interlace=NoInterlace; image_info->channel=DefaultChannels; image_info->quality=UndefinedCompressionQuality; image_info->antialias=MagickTrue; image_info->dither=MagickTrue; synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { image_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } exception=AcquireExceptionInfo(); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image_info->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance, &image_info->border_color,exception); (void) QueryColorCompliance(MatteColor,AllCompliance,&image_info->matte_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image_info->transparent_color,exception); exception=DestroyExceptionInfo(exception); image_info->debug=IsEventLogging(); image_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfoFile() returns the image info file member. % % The format of the GetImageInfoFile method is: % % FILE *GetImageInfoFile(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info) { return(image_info->file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMask() returns the mask associated with the image. % % The format of the GetImageMask method is: % % Image *GetImageMask(const Image *image,const PixelMask type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % */ MagickExport Image *GetImageMask(const Image *image,const PixelMask type, ExceptionInfo *exception) { CacheView *mask_view, *image_view; Image *mask_image; MagickBooleanType status; ssize_t y; /* Get image mask. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); switch (type) { case ReadPixelMask: { if ((image->channels & ReadMaskChannel) == 0) return((Image *) NULL); break; } case WritePixelMask: { if ((image->channels & WriteMaskChannel) == 0) return((Image *) NULL); break; } default: { if ((image->channels & CompositeMaskChannel) == 0) return((Image *) NULL); break; } } mask_image=AcquireImage((ImageInfo *) NULL,exception); status=SetImageExtent(mask_image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(mask_image)); status=MagickTrue; mask_image->alpha_trait=UndefinedPixelTrait; (void) SetImageColorspace(mask_image,GRAYColorspace,exception); image_view=AcquireVirtualCacheView(image,exception); mask_view=AcquireAuthenticCacheView(mask_image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { switch (type) { case ReadPixelMask: { SetPixelGray(mask_image,GetPixelReadMask(image,p),q); break; } case WritePixelMask: { SetPixelGray(mask_image,GetPixelWriteMask(image,p),q); break; } default: { SetPixelGray(mask_image,GetPixelCompositeMask(image,p),q); break; } } p+=GetPixelChannels(image); q+=GetPixelChannels(mask_image); } if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse) status=MagickFalse; } mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) mask_image=DestroyImage(mask_image); return(mask_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e R e f e r e n c e C o u n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageReferenceCount() returns the image reference count. % % The format of the GetReferenceCount method is: % % ssize_t GetImageReferenceCount(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ssize_t GetImageReferenceCount(Image *image) { ssize_t reference_count; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); LockSemaphoreInfo(image->semaphore); reference_count=image->reference_count; UnlockSemaphoreInfo(image->semaphore); return(reference_count); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageVirtualPixelMethod() gets the "virtual pixels" method for the % image. A virtual pixel is any pixel access that is outside the boundaries % of the image cache. % % The format of the GetImageVirtualPixelMethod() method is: % % VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(GetPixelCacheVirtualMethod(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageFilename() interprets embedded characters in an image filename. % The filename length is returned. % % The format of the InterpretImageFilename method is: % % size_t InterpretImageFilename(const ImageInfo *image_info,Image *image, % const char *format,int value,char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info.. % % o image: the image. % % o format: A filename describing the format to use to write the numeric % argument. Only the first numeric format identifier is replaced. % % o value: Numeric value to substitute into format filename. % % o filename: return the formatted filename in this character buffer. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t InterpretImageFilename(const ImageInfo *image_info, Image *image,const char *format,int value,char *filename, ExceptionInfo *exception) { char *q; int c; MagickBooleanType canonical; register const char *p; ssize_t field_width, offset; canonical=MagickFalse; offset=0; (void) CopyMagickString(filename,format,MagickPathExtent); for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%')) { q=(char *) p+1; if (*q == '%') { p=q+1; continue; } field_width=0; if (*q == '0') field_width=(ssize_t) strtol(q,&q,10); switch (*q) { case 'd': case 'o': case 'x': { q++; c=(*q); *q='\0'; (void) FormatLocaleString(filename+(p-format-offset),(size_t) (MagickPathExtent-(p-format-offset)),p,value); offset+=(4-field_width); *q=c; (void) ConcatenateMagickString(filename,q,MagickPathExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } case '[': { char pattern[MagickPathExtent]; const char *option; register char *r; register ssize_t i; ssize_t depth; /* Image option. */ if (strchr(p,']') == (char *) NULL) break; depth=1; r=q+1; for (i=0; (i < (MagickPathExtent-1L)) && (*r != '\0'); i++) { if (*r == '[') depth++; if (*r == ']') depth--; if (depth <= 0) break; pattern[i]=(*r++); } pattern[i]='\0'; if (LocaleNCompare(pattern,"filename:",9) != 0) break; option=(const char *) NULL; if (image != (Image *) NULL) option=GetImageProperty(image,pattern,exception); if ((option == (const char *) NULL) && (image != (Image *) NULL)) option=GetImageArtifact(image,pattern); if ((option == (const char *) NULL) && (image_info != (ImageInfo *) NULL)) option=GetImageOption(image_info,pattern); if (option == (const char *) NULL) break; q--; c=(*q); *q='\0'; (void) CopyMagickString(filename+(p-format-offset),option,(size_t) (MagickPathExtent-(p-format-offset))); offset+=strlen(pattern)-strlen(option)+3; *q=c; (void) ConcatenateMagickString(filename,r+1,MagickPathExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } default: break; } } if (canonical == MagickFalse) (void) CopyMagickString(filename,format,MagickPathExtent); else for (q=filename; *q != '\0'; q++) if ((*q == '%') && (*(q+1) == '%')) (void) CopyMagickString(q,q+1,(size_t) (MagickPathExtent-(q-filename))); return(strlen(filename)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s H i g h D y n a m i c R a n g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsHighDynamicRangeImage() returns MagickTrue if any pixel component is % non-integer or exceeds the bounds of the quantum depth (e.g. for Q16 % 0..65535. % % The format of the IsHighDynamicRangeImage method is: % % MagickBooleanType IsHighDynamicRangeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image, ExceptionInfo *exception) { #if !defined(MAGICKCORE_HDRI_SUPPORT) (void) image; (void) exception; return(MagickFalse); #else CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelTrait traits; traits=GetPixelChannelTraits(image,(PixelChannel) i); if (traits == UndefinedPixelTrait) continue; pixel=(double) p[i]; if ((pixel < 0.0) || (pixel > QuantumRange) || (pixel != (double) ((QuantumAny) pixel))) break; } p+=GetPixelChannels(image); if (i < (ssize_t) GetPixelChannels(image)) status=MagickFalse; } if (x < (ssize_t) image->columns) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status != MagickFalse ? MagickFalse : MagickTrue); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O b j e c t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageObject() returns MagickTrue if the image sequence contains a valid % set of image objects. % % The format of the IsImageObject method is: % % MagickBooleanType IsImageObject(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageObject(const Image *image) { register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) if (p->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTaintImage() returns MagickTrue any pixel in the image has been altered % since it was first constituted. % % The format of the IsTaintImage method is: % % MagickBooleanType IsTaintImage(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsTaintImage(const Image *image) { char magick[MagickPathExtent], filename[MagickPathExtent]; register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); (void) CopyMagickString(magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { if (p->taint != MagickFalse) return(MagickTrue); if (LocaleCompare(p->magick,magick) != 0) return(MagickTrue); if (LocaleCompare(p->filename,filename) != 0) return(MagickTrue); } return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModifyImage() ensures that there is only a single reference to the image % to be modified, updating the provided image pointer to point to a clone of % the original image if necessary. % % The format of the ModifyImage method is: % % MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ModifyImage(Image **image, ExceptionInfo *exception) { Image *clone_image; assert(image != (Image **) NULL); assert(*image != (Image *) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (GetImageReferenceCount(*image) <= 1) return(MagickTrue); clone_image=CloneImage(*image,0,0,MagickTrue,exception); LockSemaphoreInfo((*image)->semaphore); (*image)->reference_count--; UnlockSemaphoreInfo((*image)->semaphore); *image=clone_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w M a g i c k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewMagickImage() creates a blank image canvas of the specified size and % background color. % % The format of the NewMagickImage method is: % % Image *NewMagickImage(const ImageInfo *image_info,const size_t width, % const size_t height,const PixelInfo *background, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the image width. % % o height: the image height. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *NewMagickImage(const ImageInfo *image_info, const size_t width,const size_t height,const PixelInfo *background, ExceptionInfo *exception) { CacheView *image_view; Image *image; MagickBooleanType status; ssize_t y; assert(image_info != (const ImageInfo *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info->signature == MagickCoreSignature); assert(background != (const PixelInfo *) NULL); image=AcquireImage(image_info,exception); image->columns=width; image->rows=height; image->colorspace=background->colorspace; image->alpha_trait=background->alpha_trait; image->fuzz=background->fuzz; image->depth=background->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e f e r e n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferenceImage() increments the reference count associated with an image % returning a pointer to the image. % % The format of the ReferenceImage method is: % % Image *ReferenceImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *ReferenceImage(Image *image) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); LockSemaphoreInfo(image->semaphore); image->reference_count++; UnlockSemaphoreInfo(image->semaphore); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePage() resets the image page canvas and position. % % The format of the ResetImagePage method is: % % MagickBooleanType ResetImagePage(Image *image,const char *page) % % A description of each parameter follows: % % o image: the image. % % o page: the relative page specification. % */ MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page) { MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseAbsoluteGeometry(page,&geometry); if ((flags & WidthValue) != 0) { if ((flags & HeightValue) == 0) geometry.height=geometry.width; image->page.width=geometry.width; image->page.height=geometry.height; } if ((flags & AspectValue) != 0) { if ((flags & XValue) != 0) image->page.x+=geometry.x; if ((flags & YValue) != 0) image->page.y+=geometry.y; } else { if ((flags & XValue) != 0) { image->page.x=geometry.x; if ((image->page.width == 0) && (geometry.x > 0)) image->page.width=image->columns+geometry.x; } if ((flags & YValue) != 0) { image->page.y=geometry.y; if ((image->page.height == 0) && (geometry.y > 0)) image->page.height=image->rows+geometry.y; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePixels() reset the image pixels, that is, all the pixel components % are zereod. % % The format of the SetImage method is: % % MagickBooleanType ResetImagePixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ResetImagePixels(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; size_t length; ssize_t y; void *pixels; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); pixels=AcquirePixelCachePixels(image,&length,exception); if (pixels != (void *) NULL) { /* Reset in-core image pixels. */ (void) memset(pixels,0,length); return(MagickTrue); } /* Reset image pixels. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { (void) memset(q,0,GetPixelChannels(image)*sizeof(Quantum)); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlpha() sets the alpha levels of the image. % % The format of the SetImageAlpha method is: % % MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha: the level of transparency: 0 is fully transparent and QuantumRange % is fully opaque. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) > (QuantumRange/2)) SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e B a c k g r o u n d C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageBackgroundColor() initializes the image pixels to the image % background color. The background color is defined by the background_color % member of the image structure. % % The format of the SetImage method is: % % MagickBooleanType SetImageBackgroundColor(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageBackgroundColor(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo background; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlphaChannel(image,OnAlphaChannel,exception); ConformPixelInfo(image,&image->background_color,&background,exception); /* Set image background color. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannelMask() sets the image channel mask from the specified channel % mask. % % The format of the SetImageChannelMask method is: % % ChannelType SetImageChannelMask(Image *image, % const ChannelType channel_mask) % % A description of each parameter follows: % % o image: the image. % % o channel_mask: the channel mask. % */ MagickExport ChannelType SetImageChannelMask(Image *image, const ChannelType channel_mask) { return(SetPixelChannelMask(image,channel_mask)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColor() set the entire image canvas to the specified color. % % The format of the SetImageColor method is: % % MagickBooleanType SetImageColor(Image *image,const PixelInfo *color, % ExeptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColor(Image *image, const PixelInfo *color,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); assert(color != (const PixelInfo *) NULL); image->colorspace=color->colorspace; image->alpha_trait=color->alpha_trait; image->fuzz=color->fuzz; image->depth=color->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,color,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageStorageClass() sets the image class: DirectClass for true color % images or PseudoClass for colormapped images. % % The format of the SetImageStorageClass method is: % % MagickBooleanType SetImageStorageClass(Image *image, % const ClassType storage_class,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o storage_class: The image class. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageStorageClass(Image *image, const ClassType storage_class,ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image->storage_class=storage_class; return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageExtent() sets the image size (i.e. columns & rows). % % The format of the SetImageExtent method is: % % MagickBooleanType SetImageExtent(Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: The image width in pixels. % % o rows: The image height in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { if ((columns == 0) || (rows == 0)) ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename); image->columns=columns; image->rows=rows; if (image->depth == 0) { image->depth=8; (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageDepthNotSupported","`%s'",image->filename); } if (image->depth > (8*sizeof(MagickSizeType))) { image->depth=8*sizeof(MagickSizeType); (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageDepthNotSupported","`%s'",image->filename); } return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfo() initializes the 'magick' field of the ImageInfo structure. % It is set to a type of image format based on the prefix or suffix of the % filename. For example, 'ps:image' returns PS indicating a Postscript image. % JPEG is returned for this filename: 'image.jpg'. The filename prefix has % precendence over the suffix. Use an optional index enclosed in brackets % after a file name to specify a desired scene of a multi-resolution image % format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value % indicates success. % % The format of the SetImageInfo method is: % % MagickBooleanType SetImageInfo(ImageInfo *image_info, % const unsigned int frames,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o frames: the number of images you intend to write. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info, const unsigned int frames,ExceptionInfo *exception) { char component[MagickPathExtent], magic[MagickPathExtent], #if defined(MAGICKCORE_ZLIB_DELEGATE) || defined(MAGICKCORE_BZLIB_DELEGATE) path[MagickPathExtent], #endif *q; const MagicInfo *magic_info; const MagickInfo *magick_info; ExceptionInfo *sans_exception; Image *image; MagickBooleanType status; register const char *p; ssize_t count; /* Look for 'image.format' in filename. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); *component='\0'; GetPathComponent(image_info->filename,SubimagePath,component); if (*component != '\0') { /* Look for scene specification (e.g. img0001.pcd[4]). */ if (IsSceneGeometry(component,MagickFalse) == MagickFalse) { if (IsGeometry(component) != MagickFalse) (void) CloneString(&image_info->extract,component); } else { size_t first, last; (void) CloneString(&image_info->scenes,component); image_info->scene=StringToUnsignedLong(image_info->scenes); image_info->number_scenes=image_info->scene; p=image_info->scenes; for (q=(char *) image_info->scenes; *q != '\0'; p++) { while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; first=(size_t) strtol(p,&q,10); last=first; while (isspace((int) ((unsigned char) *q)) != 0) q++; if (*q == '-') last=(size_t) strtol(q+1,&q,10); if (first > last) Swap(first,last); if (first < image_info->scene) image_info->scene=first; if (last > image_info->number_scenes) image_info->number_scenes=last; p=q; } image_info->number_scenes-=image_info->scene-1; } } *component='\0'; if (*image_info->magick == '\0') GetPathComponent(image_info->filename,ExtensionPath,component); #if defined(MAGICKCORE_ZLIB_DELEGATE) if (*component != '\0') if ((LocaleCompare(component,"gz") == 0) || (LocaleCompare(component,"Z") == 0) || (LocaleCompare(component,"svgz") == 0) || (LocaleCompare(component,"wmz") == 0)) { (void) CopyMagickString(path,image_info->filename,MagickPathExtent); path[strlen(path)-strlen(component)-1]='\0'; GetPathComponent(path,ExtensionPath,component); } #endif #if defined(MAGICKCORE_BZLIB_DELEGATE) if (*component != '\0') if (LocaleCompare(component,"bz2") == 0) { (void) CopyMagickString(path,image_info->filename,MagickPathExtent); path[strlen(path)-strlen(component)-1]='\0'; GetPathComponent(path,ExtensionPath,component); } #endif image_info->affirm=MagickFalse; sans_exception=AcquireExceptionInfo(); if ((*component != '\0') && (IsGlob(component) == MagickFalse)) { MagickFormatType format_type; register ssize_t i; static const char *format_type_formats[] = { "AUTOTRACE", "BROWSE", "DCRAW", "EDIT", "LAUNCH", "MPEG:DECODE", "MPEG:ENCODE", "PRINT", "PS:ALPHA", "PS:CMYK", "PS:COLOR", "PS:GRAY", "PS:MONO", "SCAN", "SHOW", "WIN", (char *) NULL }; /* User specified image format. */ (void) CopyMagickString(magic,component,MagickPathExtent); LocaleUpper(magic); /* Look for explicit image formats. */ format_type=UndefinedFormatType; magick_info=GetMagickInfo(magic,sans_exception); if ((magick_info != (const MagickInfo *) NULL) && (magick_info->format_type != UndefinedFormatType)) format_type=magick_info->format_type; i=0; while ((format_type == UndefinedFormatType) && (format_type_formats[i] != (char *) NULL)) { if ((*magic == *format_type_formats[i]) && (LocaleCompare(magic,format_type_formats[i]) == 0)) format_type=ExplicitFormatType; i++; } if (format_type == UndefinedFormatType) (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); else if (format_type == ExplicitFormatType) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); } if (LocaleCompare(magic,"RGB") == 0) image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */ } /* Look for explicit 'format:image' in filename. */ *magic='\0'; GetPathComponent(image_info->filename,MagickPath,magic); if (*magic == '\0') { (void) CopyMagickString(magic,image_info->magick,MagickPathExtent); magick_info=GetMagickInfo(magic,sans_exception); if (frames == 0) GetPathComponent(image_info->filename,CanonicalPath,component); else GetPathComponent(image_info->filename,SubcanonicalPath,component); (void) CopyMagickString(image_info->filename,component,MagickPathExtent); } else { const DelegateInfo *delegate_info; /* User specified image format. */ LocaleUpper(magic); magick_info=GetMagickInfo(magic,sans_exception); delegate_info=GetDelegateInfo(magic,"*",sans_exception); if (delegate_info == (const DelegateInfo *) NULL) delegate_info=GetDelegateInfo("*",magic,sans_exception); if (((magick_info != (const MagickInfo *) NULL) || (delegate_info != (const DelegateInfo *) NULL)) && (IsMagickConflict(magic) == MagickFalse)) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); GetPathComponent(image_info->filename,CanonicalPath,component); (void) CopyMagickString(image_info->filename,component, MagickPathExtent); } } sans_exception=DestroyExceptionInfo(sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; if ((image_info->adjoin != MagickFalse) && (frames > 1)) { /* Test for multiple image support (e.g. image%02d.png). */ (void) InterpretImageFilename(image_info,(Image *) NULL, image_info->filename,(int) image_info->scene,component,exception); if ((LocaleCompare(component,image_info->filename) != 0) && (strchr(component,'%') == (char *) NULL)) image_info->adjoin=MagickFalse; } if ((image_info->adjoin != MagickFalse) && (frames > 0)) { /* Some image formats do not support multiple frames per file. */ magick_info=GetMagickInfo(magic,exception); if (magick_info != (const MagickInfo *) NULL) if (GetMagickAdjoin(magick_info) == MagickFalse) image_info->adjoin=MagickFalse; } if (image_info->affirm != MagickFalse) return(MagickTrue); if (frames == 0) { unsigned char *magick; size_t magick_size; /* Determine the image format from the first few bytes of the file. */ magick_size=GetMagicPatternExtent(exception); if (magick_size == 0) return(MagickFalse); image=AcquireImage(image_info,exception); (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } if ((IsBlobSeekable(image) == MagickFalse) || (IsBlobExempt(image) != MagickFalse)) { /* Copy image to seekable temporary file. */ *component='\0'; status=ImageToFile(image,component,exception); (void) CloseBlob(image); if (status == MagickFalse) { (void) RelinquishUniqueFileResource(component); image=DestroyImage(image); return(MagickFalse); } SetImageInfoFile(image_info,(FILE *) NULL); (void) CopyMagickString(image->filename,component,MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { (void) RelinquishUniqueFileResource(component); image=DestroyImage(image); return(MagickFalse); } (void) CopyMagickString(image_info->filename,component, MagickPathExtent); image_info->temporary=MagickTrue; } magick=(unsigned char *) AcquireQuantumMemory(1,magick_size); if (magick == (unsigned char *) NULL) { (void) CloseBlob(image); image=DestroyImage(image); return(MagickFalse); } (void) memset(magick,0,magick_size); count=ReadBlob(image,magick_size,magick); (void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR); (void) CloseBlob(image); image=DestroyImage(image); /* Check magic cache. */ sans_exception=AcquireExceptionInfo(); magic_info=GetMagicInfo(magick,(size_t) count,sans_exception); magick=(unsigned char *) RelinquishMagickMemory(magick); if ((magic_info != (const MagicInfo *) NULL) && (GetMagicName(magic_info) != (char *) NULL)) { /* Try to use magick_info that was determined earlier by the extension */ if ((magick_info != (const MagickInfo *) NULL) && (GetMagickUseExtension(magick_info) != MagickFalse) && (LocaleCompare(magick_info->magick_module,GetMagicName( magic_info)) == 0)) (void) CopyMagickString(image_info->magick,magick_info->name, MagickPathExtent); else { (void) CopyMagickString(image_info->magick,GetMagicName( magic_info),MagickPathExtent); magick_info=GetMagickInfo(image_info->magick,sans_exception); } if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); return(MagickTrue); } magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoBlob() sets the image info blob member. % % The format of the SetImageInfoBlob method is: % % void SetImageInfoBlob(ImageInfo *image_info,const void *blob, % const size_t length) % % A description of each parameter follows: % % o image_info: the image info. % % o blob: the blob. % % o length: the blob length. % */ MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob, const size_t length) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->blob=(void *) blob; image_info->length=length; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o C u s t o m S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoCustomStream() sets the image info custom stream handlers. % % The format of the SetImageInfoCustomStream method is: % % void SetImageInfoCustomStream(ImageInfo *image_info, % CustomStreamInfo *custom_stream) % % A description of each parameter follows: % % o image_info: the image info. % % o custom_stream: your custom stream methods. % */ MagickExport void SetImageInfoCustomStream(ImageInfo *image_info, CustomStreamInfo *custom_stream) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->custom_stream=(CustomStreamInfo *) custom_stream; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoFile() sets the image info file member. % % The format of the SetImageInfoFile method is: % % void SetImageInfoFile(ImageInfo *image_info,FILE *file) % % A description of each parameter follows: % % o image_info: the image info. % % o file: the file. % */ MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->file=file; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMask() associates a mask with the image. The mask must be the same % dimensions as the image. % % The format of the SetImageMask method is: % % MagickBooleanType SetImageMask(Image *image,const PixelMask type, % const Image *mask,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % % o mask: the image mask. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMask(Image *image,const PixelMask type, const Image *mask,ExceptionInfo *exception) { CacheView *mask_view, *image_view; MagickBooleanType status; ssize_t y; /* Set image mask. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (mask == (const Image *) NULL) { switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels & ~ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels & ~WriteMaskChannel); } default: { image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel); break; } } return(SyncImagePixelCache(image,exception)); } switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels | ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels | WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels | CompositeMaskChannel); break; } } if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; image->mask_trait=UpdatePixelTrait; mask_view=AcquireVirtualCacheView(mask,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(mask,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity; intensity=0.0; if ((x < (ssize_t) mask->columns) && (y < (ssize_t) mask->rows)) intensity=GetPixelIntensity(mask,p); switch (type) { case ReadPixelMask: { SetPixelReadMask(image,ClampToQuantum(intensity),q); break; } case WritePixelMask: { SetPixelWriteMask(image,ClampToQuantum(intensity),q); break; } default: { SetPixelCompositeMask(image,ClampToQuantum(intensity),q); break; } } p+=GetPixelChannels(mask); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image->mask_trait=UndefinedPixelTrait; mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e R e g i o n M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageRegionMask() associates a mask with the image as defined by the % specified region. % % The format of the SetImageRegionMask method is: % % MagickBooleanType SetImageRegionMask(Image *image,const PixelMask type, % const RectangleInfo *region,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % % o geometry: the mask region. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageRegionMask(Image *image, const PixelMask type,const RectangleInfo *region,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; /* Set image mask as defined by the region. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (region == (const RectangleInfo *) NULL) { switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels & ~ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels & ~WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel); break; } } return(SyncImagePixelCache(image,exception)); } switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels | ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels | WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels | CompositeMaskChannel); break; } } if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; image->mask_trait=UpdatePixelTrait; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum pixel; pixel=QuantumRange; if (((x >= region->x) && (x < (region->x+(ssize_t) region->width))) && ((y >= region->y) && (y < (region->y+(ssize_t) region->height)))) pixel=(Quantum) 0; switch (type) { case ReadPixelMask: { SetPixelReadMask(image,pixel,q); break; } case WritePixelMask: { SetPixelWriteMask(image,pixel,q); break; } default: { SetPixelCompositeMask(image,pixel,q); break; } } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image->mask_trait=UndefinedPixelTrait; image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageVirtualPixelMethod() sets the "virtual pixels" method for the % image and returns the previous setting. A virtual pixel is any pixel access % that is outside the boundaries of the image cache. % % The format of the SetImageVirtualPixelMethod() method is: % % VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S m u s h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SmushImages() takes all images from the current image pointer to the end % of the image list and smushes them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the SmushImages method is: % % Image *SmushImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o offset: minimum distance in pixels between images. % % o exception: return any errors or warnings in this structure. % */ static ssize_t SmushXGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *left_view, *right_view; const Image *left_image, *right_image; RectangleInfo left_geometry, right_geometry; register const Quantum *p; register ssize_t i, y; size_t gap; ssize_t x; if (images->previous == (Image *) NULL) return(0); right_image=images; SetGeometry(smush_image,&right_geometry); GravityAdjustGeometry(right_image->columns,right_image->rows, right_image->gravity,&right_geometry); left_image=images->previous; SetGeometry(smush_image,&left_geometry); GravityAdjustGeometry(left_image->columns,left_image->rows, left_image->gravity,&left_geometry); gap=right_image->columns; left_view=AcquireVirtualCacheView(left_image,exception); right_view=AcquireVirtualCacheView(right_image,exception); for (y=0; y < (ssize_t) smush_image->rows; y++) { for (x=(ssize_t) left_image->columns-1; x > 0; x--) { p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(left_image,p) != TransparentAlpha) || ((left_image->columns-x-1) >= gap)) break; } i=(ssize_t) left_image->columns-x-1; for (x=0; x < (ssize_t) right_image->columns; x++) { p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(right_image,p) != TransparentAlpha) || ((x+i) >= (ssize_t) gap)) break; } if ((x+i) < (ssize_t) gap) gap=(size_t) (x+i); } right_view=DestroyCacheView(right_view); left_view=DestroyCacheView(left_view); if (y < (ssize_t) smush_image->rows) return(offset); return((ssize_t) gap-offset); } static ssize_t SmushYGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *bottom_view, *top_view; const Image *bottom_image, *top_image; RectangleInfo bottom_geometry, top_geometry; register const Quantum *p; register ssize_t i, x; size_t gap; ssize_t y; if (images->previous == (Image *) NULL) return(0); bottom_image=images; SetGeometry(smush_image,&bottom_geometry); GravityAdjustGeometry(bottom_image->columns,bottom_image->rows, bottom_image->gravity,&bottom_geometry); top_image=images->previous; SetGeometry(smush_image,&top_geometry); GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity, &top_geometry); gap=bottom_image->rows; top_view=AcquireVirtualCacheView(top_image,exception); bottom_view=AcquireVirtualCacheView(bottom_image,exception); for (x=0; x < (ssize_t) smush_image->columns; x++) { for (y=(ssize_t) top_image->rows-1; y > 0; y--) { p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(top_image,p) != TransparentAlpha) || ((top_image->rows-y-1) >= gap)) break; } i=(ssize_t) top_image->rows-y-1; for (y=0; y < (ssize_t) bottom_image->rows; y++) { p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(bottom_image,p) != TransparentAlpha) || ((y+i) >= (ssize_t) gap)) break; } if ((y+i) < (ssize_t) gap) gap=(size_t) (y+i); } bottom_view=DestroyCacheView(bottom_view); top_view=DestroyCacheView(top_view); if (x < (ssize_t) smush_image->columns) return(offset); return((ssize_t) gap-offset); } MagickExport Image *SmushImages(const Image *images, const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception) { #define SmushImageTag "Smush/Image" const Image *image; Image *smush_image; MagickBooleanType proceed, status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; register const Image *next; size_t height, number_images, width; ssize_t x_offset, y_offset; /* Compute maximum area of smushed area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; alpha_trait=image->alpha_trait; number_images=1; width=image->columns; height=image->rows; next=GetNextImageInList(image); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->alpha_trait != UndefinedPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; if (next->previous != (Image *) NULL) height+=offset; continue; } width+=next->columns; if (next->previous != (Image *) NULL) width+=offset; if (next->rows > height) height=next->rows; } /* Smush images. */ smush_image=CloneImage(image,width,height,MagickTrue,exception); if (smush_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse) { smush_image=DestroyImage(smush_image); return((Image *) NULL); } smush_image->alpha_trait=alpha_trait; (void) SetImageBackgroundColor(smush_image,exception); status=MagickTrue; x_offset=0; y_offset=0; for (n=0; n < (MagickOffsetType) number_images; n++) { SetGeometry(smush_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) { x_offset-=geometry.x; y_offset-=SmushYGap(smush_image,image,offset,exception); } else { x_offset-=SmushXGap(smush_image,image,offset,exception); y_offset-=geometry.y; } status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset, y_offset,exception); proceed=SetImageProgress(image,SmushImageTag,n,number_images); if (proceed == MagickFalse) break; if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=GetNextImageInList(image); } if (stack == MagickFalse) smush_image->columns=(size_t) x_offset; else smush_image->rows=(size_t) y_offset; if (status == MagickFalse) smush_image=DestroyImage(smush_image); return(smush_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StripImage() strips an image of all profiles and comments. % % The format of the StripImage method is: % % MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); (void) exception; DestroyImageProfiles(image); (void) DeleteImageProperty(image,"comment"); (void) DeleteImageProperty(image,"date:create"); (void) DeleteImageProperty(image,"date:modify"); status=SetImageArtifact(image,"png:exclude-chunk", "bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImage() initializes the red, green, and blue intensities of each pixel % as defined by the colormap index. % % The format of the SyncImage method is: % % MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PushColormapIndex(Image *image,const Quantum index, MagickBooleanType *range_exception) { if ((size_t) index < image->colors) return(index); *range_exception=MagickTrue; return((Quantum) 0); } MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType range_exception, status, taint; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->ping != MagickFalse) return(MagickTrue); if (image->storage_class != PseudoClass) return(MagickFalse); assert(image->colormap != (PixelInfo *) NULL); range_exception=MagickFalse; status=MagickTrue; taint=image->taint; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(range_exception,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum index; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->taint=taint; if ((image->ping == MagickFalse) && (range_exception != MagickFalse)) (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e S e t t i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageSettings() syncs any image_info global options into per-image % attributes. % % Note: in IMv6 free form 'options' were always mapped into 'artifacts', so % that operations and coders can find such settings. In IMv7 if a desired % per-image artifact is not set, then it will directly look for a global % option as a fallback, as such this copy is no longer needed, only the % link set up. % % The format of the SyncImageSettings method is: % % MagickBooleanType SyncImageSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % MagickBooleanType SyncImagesSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info, Image *images,ExceptionInfo *exception) { Image *image; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) (void) SyncImageSettings(image_info,image,exception); (void) DeleteImageOption(image_info,"page"); return(MagickTrue); } MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const char *option; GeometryInfo geometry_info; MagickStatusType flags; ResolutionType units; /* Sync image options. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); option=GetImageOption(image_info,"background"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->background_color, exception); option=GetImageOption(image_info,"black-point-compensation"); if (option != (const char *) NULL) image->black_point_compensation=(MagickBooleanType) ParseCommandOption( MagickBooleanOptions,MagickFalse,option); option=GetImageOption(image_info,"blue-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x; } option=GetImageOption(image_info,"bordercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->border_color, exception); /* FUTURE: do not sync compose to per-image compose setting here */ option=GetImageOption(image_info,"compose"); if (option != (const char *) NULL) image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,option); /* -- */ option=GetImageOption(image_info,"compress"); if (option != (const char *) NULL) image->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,option); option=GetImageOption(image_info,"debug"); if (option != (const char *) NULL) image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } option=GetImageOption(image_info,"depth"); if (option != (const char *) NULL) image->depth=StringToUnsignedLong(option); option=GetImageOption(image_info,"endian"); if (option != (const char *) NULL) image->endian=(EndianType) ParseCommandOption(MagickEndianOptions, MagickFalse,option); option=GetImageOption(image_info,"filter"); if (option != (const char *) NULL) image->filter=(FilterType) ParseCommandOption(MagickFilterOptions, MagickFalse,option); option=GetImageOption(image_info,"fuzz"); if (option != (const char *) NULL) image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"gravity"); if (option != (const char *) NULL) image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(image_info,"green-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.green_primary.y=image->chromaticity.green_primary.x; } option=GetImageOption(image_info,"intent"); if (option != (const char *) NULL) image->rendering_intent=(RenderingIntent) ParseCommandOption( MagickIntentOptions,MagickFalse,option); option=GetImageOption(image_info,"intensity"); if (option != (const char *) NULL) image->intensity=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,option); option=GetImageOption(image_info,"interlace"); if (option != (const char *) NULL) image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions, MagickFalse,option); option=GetImageOption(image_info,"interpolate"); if (option != (const char *) NULL) image->interpolate=(PixelInterpolateMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,option); option=GetImageOption(image_info,"loop"); if (option != (const char *) NULL) image->iterations=StringToUnsignedLong(option); option=GetImageOption(image_info,"mattecolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->matte_color, exception); option=GetImageOption(image_info,"orient"); if (option != (const char *) NULL) image->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,option); option=GetImageOption(image_info,"page"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) image->quality=StringToUnsignedLong(option); option=GetImageOption(image_info,"red-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.red_primary.x=geometry_info.rho; image->chromaticity.red_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.red_primary.y=image->chromaticity.red_primary.x; } if (image_info->quality != UndefinedCompressionQuality) image->quality=image_info->quality; option=GetImageOption(image_info,"scene"); if (option != (const char *) NULL) image->scene=StringToUnsignedLong(option); option=GetImageOption(image_info,"taint"); if (option != (const char *) NULL) image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"tile-offset"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"transparent-color"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->transparent_color, exception); option=GetImageOption(image_info,"type"); if (option != (const char *) NULL) image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, option); option=GetImageOption(image_info,"units"); units=image_info->units; if (option != (const char *) NULL) units=(ResolutionType) ParseCommandOption(MagickResolutionOptions, MagickFalse,option); if (units != UndefinedResolution) { if (image->units != units) switch (image->units) { case PixelsPerInchResolution: { if (units == PixelsPerCentimeterResolution) { image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case PixelsPerCentimeterResolution: { if (units == PixelsPerInchResolution) { image->resolution.x=(double) ((size_t) (100.0*2.54* image->resolution.x+0.5))/100.0; image->resolution.y=(double) ((size_t) (100.0*2.54* image->resolution.y+0.5))/100.0; } break; } default: break; } image->units=units; option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } } option=GetImageOption(image_info,"virtual-pixel"); if (option != (const char *) NULL) (void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod) ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option), exception); option=GetImageOption(image_info,"white-point"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.white_point.y=image->chromaticity.white_point.x; } /* Pointer to allow the lookup of pre-image artifact will fallback to a global option setting/define. This saves a lot of duplication of global options into per-image artifacts, while ensuring only specifically set per-image artifacts are preserved when parenthesis ends. */ if (image->image_info != (ImageInfo *) NULL) image->image_info=DestroyImageInfo(image->image_info); image->image_info=CloneImageInfo(image_info); return(MagickTrue); }
DEMFunctor.h
/** * @file DEMFunctor.h * * @date 01.07.21 * @author R. Penz */ #pragma once #include <array> #include <cmath> #include "autopas/pairwiseFunctors/Functor.h" #include "autopas/particles/OwnershipState.h" #include "autopas/utils/AlignedAllocator.h" #include "autopas/utils/ArrayMath.h" #include "autopas/utils/ExceptionHandler.h" #include "autopas/utils/SoA.h" #include "autopas/utils/StaticBoolSelector.h" #include "autopas/utils/WrapOpenMP.h" #include "autopas/utils/inBox.h" namespace autopas { /** * A functor to handle interactions between two DEM particles. * @tparam Particle The type of particle. * @tparam useNewton3 Switch for the functor to support newton3 on, off or both. See FunctorN3Modes for possible values. * @tparam relevantForTuning Whether or not the auto-tuner should consider this functor. */ template <class Particle, FunctorN3Modes useNewton3 = FunctorN3Modes::Both, bool relevantForTuning = true> class DEMFunctor : public Functor<Particle, DEMFunctor<Particle, useNewton3, relevantForTuning>> { /** * Structure of the SoAs defined by the particle. */ using SoAArraysType = typename Particle::SoAArraysType; /** * Precision of SoA entries. */ using SoAFloatPrecision = typename Particle::ParticleSoAFloatPrecision; public: /** * Deleted default constructor */ DEMFunctor() = delete; /** * Internal, actual constructor. * @param cutoff */ explicit DEMFunctor(double cutoff) : Functor<Particle, DEMFunctor<Particle, useNewton3, relevantForTuning>>(cutoff) {} bool isRelevantForTuning() final { return relevantForTuning; } bool allowsNewton3() final { return useNewton3 == FunctorN3Modes::Newton3Only or useNewton3 == FunctorN3Modes::Both; } bool allowsNonNewton3() final { return useNewton3 == FunctorN3Modes::Newton3Off or useNewton3 == FunctorN3Modes::Both; } /** * Hertz elastic Solution * F = 4/3*E*sqrt(R)*sqrt(delta^3) * R = R1*R2/(R1+R2) * 1/E = (1-v1^2)/E1 + (1-v2^2)/E2 * * * needed Parameters: * E1, E2 - respective Young's modulus, need to specify * v1, v2 - respective Poisson ratio, need to specify * R - Radii * delta - approach distance */ void AoSFunctor(Particle &i, Particle &j, bool newton3) final { if (i.isDummy() or j.isDummy()) { return; } auto dr = utils::ArrayMath::sub(i.getR(), j.getR()); //distance between ParticleCenters double penDepth = i.getRad()+j.getRad()-utils::ArrayMath::L2Norm(dr); if(penDepth <= 0) return; //return if Particles dont intersect double e1 = (1-pow(i.getPoisson(), 2))/i.getYoung(); double e2 = (1-pow(j.getPoisson(), 2))/j.getYoung(); double e = 1/(e1+e2); double r = i.getRad()*j.getRad()/(i.getRad()+j.getRad()); //calculate Force and ForceVector double f = 4/3*e*sqrt(r)*pow(penDepth, 3./2.); auto vecf = utils::ArrayMath::mulScalar (utils::ArrayMath::normalize(dr),f); i.addF(vecf); if(newton3) { j.subF(vecf); } } void SoAFunctorCalc(SoAView<SoAArraysType> soa1, SoAView<SoAArraysType> soa2, bool single, bool newton3) { if (soa1.getNumParticles() == 0 || soa2.getNumParticles() == 0) return; size_t shift = 0; const auto *const __restrict id1ptr = soa1.template begin<Particle::AttributeNames::id>(); const auto *const __restrict id2ptr = soa2.template begin<Particle::AttributeNames::id>(); const auto *const __restrict x1ptr = soa1.template begin<Particle::AttributeNames::posX>(); const auto *const __restrict y1ptr = soa1.template begin<Particle::AttributeNames::posY>(); const auto *const __restrict z1ptr = soa1.template begin<Particle::AttributeNames::posZ>(); const auto *const __restrict x2ptr = soa2.template begin<Particle::AttributeNames::posX>(); const auto *const __restrict y2ptr = soa2.template begin<Particle::AttributeNames::posY>(); const auto *const __restrict z2ptr = soa2.template begin<Particle::AttributeNames::posZ>(); SoAFloatPrecision *const __restrict fx1ptr = soa1.template begin<Particle::AttributeNames::forceX>(); SoAFloatPrecision *const __restrict fy1ptr = soa1.template begin<Particle::AttributeNames::forceY>(); SoAFloatPrecision *const __restrict fz1ptr = soa1.template begin<Particle::AttributeNames::forceZ>(); SoAFloatPrecision *const __restrict fx2ptr = soa2.template begin<Particle::AttributeNames::forceX>(); SoAFloatPrecision *const __restrict fy2ptr = soa2.template begin<Particle::AttributeNames::forceY>(); SoAFloatPrecision *const __restrict fz2ptr = soa2.template begin<Particle::AttributeNames::forceZ>(); const auto *const __restrict rad1ptr = soa1.template begin<Particle::AttributeNames::rad>(); const auto *const __restrict poisson1ptr = soa1.template begin<Particle::AttributeNames::poisson>(); const auto *const __restrict young1ptr = soa1.template begin<Particle::AttributeNames::young>(); const auto *const __restrict rad2ptr = soa2.template begin<Particle::AttributeNames::rad>(); const auto *const __restrict poisson2ptr = soa2.template begin<Particle::AttributeNames::poisson>(); const auto *const __restrict young2ptr = soa2.template begin<Particle::AttributeNames::young>(); const auto *const __restrict ownership1ptr = soa1.template begin<Particle::AttributeNames::ownershipState>(); const auto *const __restrict ownership2ptr = soa2.template begin<Particle::AttributeNames::ownershipState>(); for(unsigned int i = 0; i < soa1.getNumParticles(); ++i) { if(single){shift++;} //increase shift by 1 if single View const auto ownedStateI = ownership1ptr[i]; if(ownedStateI == OwnershipState::dummy) {return;} //accumulating Force directions SoAFloatPrecision fxacc = 0; SoAFloatPrecision fyacc = 0; SoAFloatPrecision fzacc = 0; // icpc vectorizes this. // g++ only with -ffast-math or -funsafe-math-optimizations // shift for SoAFunctorSingle #pragma omp simd reduction(+ : fxacc, fyacc, fzacc) for (unsigned int j = shift; j < soa2.getNumParticles(); ++j) { const auto ownedStateJ = ownership2ptr[j]; const SoAFloatPrecision drx = x1ptr[i] - x2ptr[j]; const SoAFloatPrecision dry = y1ptr[i] - y2ptr[j]; const SoAFloatPrecision drz = z1ptr[i] - z2ptr[j]; const SoAFloatPrecision drx2 = drx * drx; const SoAFloatPrecision dry2 = dry * dry; const SoAFloatPrecision drz2 = drz * drz; const SoAFloatPrecision dr2 = drx2 + dry2 + drz2; const SoAFloatPrecision dr = sqrt(dr2); const SoAFloatPrecision radI = rad1ptr[i]; const SoAFloatPrecision radJ = rad2ptr[j]; const SoAFloatPrecision rad = radI + radJ; // Mask away if particles arent intersecting or if j is dummy. // Particle ownedStateI was already checked previously. const bool mask = dr <= rad and ownedStateJ != OwnershipState::dummy; const SoAFloatPrecision poissonI = poisson1ptr[i]; const SoAFloatPrecision youngI = young1ptr[i]; const SoAFloatPrecision poissonJ = poisson2ptr[j]; const SoAFloatPrecision youngJ = young2ptr[j]; const SoAFloatPrecision poissonI2 = poissonI * poissonI; const SoAFloatPrecision poissonJ2 = poissonJ * poissonJ; const SoAFloatPrecision e1 = (1. - poissonI2)/youngI; const SoAFloatPrecision e2 = (1. - poissonJ2)/youngJ; const SoAFloatPrecision esum = e1 + e2; const SoAFloatPrecision e = 1. / esum; const SoAFloatPrecision penDepth = rad - dr; const SoAFloatPrecision r = radI * radJ / rad; const SoAFloatPrecision fac = mask ? 4/3 * e * sqrt(r) * pow(penDepth, 3./2.) / dr : 0.; const SoAFloatPrecision fx = drx * fac; const SoAFloatPrecision fy = dry * fac; const SoAFloatPrecision fz = drz * fac; fxacc += fx; fyacc += fy; fzacc += fz; if(newton3){ fx2ptr[j] -=fx; fy2ptr[j] -=fy; fz2ptr[j] -=fz; } } fx1ptr[i] += fxacc; fy1ptr[i] += fyacc; fz1ptr[i] += fzacc; } } /** * @copydoc Functor::SoAFunctorSingle(SoAView<SoAArraysType> soa, bool newton3) */ void SoAFunctorSingle(SoAView<SoAArraysType> soa, bool newton3) final { SoAFunctorCalc(soa, soa, true, newton3); } /** * @copydoc Functor::SoAFunctorPair(SoAView<SoAArraysType> soa1, SoAView<SoAArraysType> soa2, bool newton3) */ void SoAFunctorPair(SoAView<SoAArraysType> soa1, SoAView<SoAArraysType> soa2, const bool newton3) final { if (newton3) { SoAFunctorPairImpl<true>(soa1, soa2); } else { SoAFunctorPairImpl<false>(soa1, soa2); } } private: /** * Implementation function of SoAFunctorPair(soa1, soa2, newton3) * * @tparam newton3 * @param soa1 * @param soa2 */ template <bool newton3> void SoAFunctorPairImpl(SoAView<SoAArraysType> soa1, SoAView<SoAArraysType> soa2) { SoAFunctorCalc(soa1, soa2, false, newton3); } public: // clang-format off /** * @copydoc Functor::SoAFunctorVerlet(SoAView<SoAArraysType> soa, const size_t indexFirst, const std::vector<size_t, autopas::AlignedAllocator<size_t>> &neighborList, bool newton3) */ // clang-format on void SoAFunctorVerlet(SoAView<SoAArraysType> soa, const size_t indexFirst, const std::vector<size_t, autopas::AlignedAllocator<size_t>> &neighborList, bool newton3) final { if (soa.getNumParticles() == 0 or neighborList.empty()) return; if (newton3) { SoAFunctorVerletImpl<true>(soa, indexFirst, neighborList); } else { SoAFunctorVerletImpl<false>(soa, indexFirst, neighborList); } } /** * @copydoc Functor::getNeededAttr() */ constexpr static auto getNeededAttr() { return std::array<typename Particle::AttributeNames, 12>{ Particle::AttributeNames::id, Particle::AttributeNames::posX, Particle::AttributeNames::posY, Particle::AttributeNames::posZ, Particle::AttributeNames::forceX, Particle::AttributeNames::forceY, Particle::AttributeNames::forceZ, Particle::AttributeNames::rad, Particle::AttributeNames::poisson, Particle::AttributeNames::young, Particle::AttributeNames::typeId, Particle::AttributeNames::ownershipState}; } /** * @copydoc Functor::getNeededAttr(std::false_type) */ constexpr static auto getNeededAttr(std::false_type) { return std::array<typename Particle::AttributeNames, 9>{ Particle::AttributeNames::id, Particle::AttributeNames::posX, Particle::AttributeNames::posY, Particle::AttributeNames::posZ, Particle::AttributeNames::rad, Particle::AttributeNames::poisson, Particle::AttributeNames::young, Particle::AttributeNames::typeId, Particle::AttributeNames::ownershipState}; } /** * @copydoc Functor::getComputedAttr() */ constexpr static auto getComputedAttr() { return std::array<typename Particle::AttributeNames, 3>{ Particle::AttributeNames::forceX, Particle::AttributeNames::forceY, Particle::AttributeNames::forceZ}; } /** * Get the number of flops used per kernel call. This should count the * floating point operations needed for two particles that lie within a cutoff * radius. * @return the number of floating point operations */ static unsigned long getNumFlopsPerKernelCall() { // Kernel: 12 = 1 (inverse R squared) + 8 (compute scale) + 3 (apply // scale) sum Forces: 6 (forces) kernel total = 12 + 6 = 18 return 18ul; } void initTraversal() final { _postProcessed = false; } void endTraversal(bool newton3) final { if (_postProcessed) { throw utils::ExceptionHandler::AutoPasException( "Already postprocessed, endTraversal(bool newton3) was called twice without calling initTraversal()."); } } private: template <bool newton3> void SoAFunctorVerletImpl(SoAView<SoAArraysType> soa, const size_t indexFirst, const std::vector<size_t, autopas::AlignedAllocator<size_t>> &neighborList) { const auto *const __restrict xptr = soa.template begin<Particle::AttributeNames::posX>(); const auto *const __restrict yptr = soa.template begin<Particle::AttributeNames::posY>(); const auto *const __restrict zptr = soa.template begin<Particle::AttributeNames::posZ>(); auto *const __restrict fxptr = soa.template begin<Particle::AttributeNames::forceX>(); auto *const __restrict fyptr = soa.template begin<Particle::AttributeNames::forceY>(); auto *const __restrict fzptr = soa.template begin<Particle::AttributeNames::forceZ>(); auto *const __restrict radptr = soa.template begin<Particle::AttributeNames::rad>(); auto *const __restrict poissonptr = soa.template begin<Particle::AttributeNames::poisson>(); auto *const __restrict youngptr = soa.template begin<Particle::AttributeNames::young>(); const auto *const __restrict ownedStatePtr = soa.template begin<Particle::AttributeNames::ownershipState>(); SoAFloatPrecision fxacc = 0; SoAFloatPrecision fyacc = 0; SoAFloatPrecision fzacc = 0; const size_t neighborListSize = neighborList.size(); const size_t *const __restrict neighborListPtr = neighborList.data(); // checks whether particle i is owned. const auto ownedStateI = ownedStatePtr[indexFirst]; if (ownedStateI == OwnershipState::dummy) { return; } // this is a magic number, that should correspond to at least // vectorization width*N have testet multiple sizes: // 4: does not give a speedup, slower than original AoSFunctor // 8: small speedup compared to AoS // 12: highest speedup compared to Aos // 16: smaller speedup // in theory this is a variable, we could auto-tune over... #ifdef __AVX512F__ // use a multiple of 8 for avx constexpr size_t vecsize = 16; #else // for everything else 12 is faster constexpr size_t vecsize = 12; #endif size_t joff = 0; // if the size of the verlet list is larger than the given size vecsize, // we will use a vectorized version. if (neighborListSize >= vecsize) { alignas(64) std::array<SoAFloatPrecision, vecsize> xtmp, ytmp, ztmp, radtmp, poissontmp, youngtmp, xArr, yArr, zArr, radArr, poissonArr, youngArr, fxArr, fyArr, fzArr; alignas(64) std::array<OwnershipState, vecsize> ownedStateArr{}; // broadcast of the position of particle i for (size_t tmpj = 0; tmpj < vecsize; tmpj++) { xtmp[tmpj] = xptr[indexFirst]; ytmp[tmpj] = yptr[indexFirst]; ztmp[tmpj] = zptr[indexFirst]; radtmp[tmpj] = radptr[indexFirst]; poissontmp[tmpj] = poissonptr[indexFirst]; youngtmp[tmpj] = youngptr[indexFirst]; } // loop over the verlet list from 0 to x*vecsize for (; joff < neighborListSize - vecsize + 1; joff += vecsize) { // in each iteration we calculate the interactions of particle i with // vecsize particles in the neighborlist of particle i starting at // particle joff // gather position of particle j #pragma omp simd safelen(vecsize) for (size_t tmpj = 0; tmpj < vecsize; tmpj++) { xArr[tmpj] = xptr[neighborListPtr[joff + tmpj]]; yArr[tmpj] = yptr[neighborListPtr[joff + tmpj]]; zArr[tmpj] = zptr[neighborListPtr[joff + tmpj]]; radArr[tmpj] = radptr[neighborListPtr[joff + tmpj]]; poissonArr[tmpj] = poissonptr[neighborListPtr[joff + tmpj]]; youngArr[tmpj] = youngptr[neighborListPtr[joff + tmpj]]; ownedStateArr[tmpj] = ownedStatePtr[neighborListPtr[joff + tmpj]]; } // do omp simd with reduction of the interaction #pragma omp simd reduction(+ : fxacc, fyacc, fzacc) safelen(vecsize) for (size_t j = 0; j < vecsize; j++) { const auto ownedStateJ = ownedStateArr[j]; const SoAFloatPrecision drx = xtmp[j] - xArr[j]; const SoAFloatPrecision dry = ytmp[j] - yArr[j]; const SoAFloatPrecision drz = ztmp[j] - zArr[j]; const SoAFloatPrecision drx2 = drx * drx; const SoAFloatPrecision dry2 = dry * dry; const SoAFloatPrecision drz2 = drz * drz; const SoAFloatPrecision dr2 = drx2 + dry2 + drz2; const SoAFloatPrecision dr = sqrt(dr2); const SoAFloatPrecision radI = radtmp[j]; const SoAFloatPrecision radJ = radArr[j]; const SoAFloatPrecision rad = radI + radJ; // Mask away if particles arent intersecting or if j is dummy. const bool mask = dr <= rad and ownedStateJ != OwnershipState::dummy and dr != 0; const SoAFloatPrecision poissonI = poissontmp[j]; const SoAFloatPrecision youngI = youngtmp[j]; const SoAFloatPrecision poissonJ = poissonArr[j]; const SoAFloatPrecision youngJ = youngArr[j]; const SoAFloatPrecision poissonI2 = poissonI * poissonI; const SoAFloatPrecision poissonJ2 = poissonJ * poissonJ; const SoAFloatPrecision e1 = (1. - poissonI2)/youngI; const SoAFloatPrecision e2 = (1. - poissonJ2)/youngJ; const SoAFloatPrecision esum = e1 + e2; const SoAFloatPrecision e = 1. / esum; const SoAFloatPrecision penDepth = rad - dr; const SoAFloatPrecision r = radI * radJ / rad; const SoAFloatPrecision fac = mask ? 4/3 * e * sqrt(r) * pow(penDepth, 3./2.) / dr : 0.; const SoAFloatPrecision fx = drx * fac; const SoAFloatPrecision fy = dry * fac; const SoAFloatPrecision fz = drz * fac; fxacc += fx; fyacc += fy; fzacc += fz; if (newton3) { fxArr[j] = fx; fyArr[j] = fy; fzArr[j] = fz; } } // scatter the forces to where they belong, this is only needed for newton3 if (newton3) { #pragma omp simd safelen(vecsize) for (size_t tmpj = 0; tmpj < vecsize; tmpj++) { const size_t j = neighborListPtr[joff + tmpj]; fxptr[j] -= fxArr[tmpj]; fyptr[j] -= fyArr[tmpj]; fzptr[j] -= fzArr[tmpj]; } } } } // this loop goes over the remainder and uses no optimizations for (size_t jNeighIndex = joff; jNeighIndex < neighborListSize; ++jNeighIndex) { size_t j = neighborList[jNeighIndex]; if (indexFirst == j) continue; const auto ownedStateJ = ownedStatePtr[j]; if (ownedStateJ == OwnershipState::dummy) { continue; } const SoAFloatPrecision drx = xptr[indexFirst] - xptr[j]; const SoAFloatPrecision dry = yptr[indexFirst] - yptr[j]; const SoAFloatPrecision drz = zptr[indexFirst] - zptr[j]; const SoAFloatPrecision drx2 = drx * drx; const SoAFloatPrecision dry2 = dry * dry; const SoAFloatPrecision drz2 = drz * drz; const SoAFloatPrecision dr2 = drx2 + dry2 + drz2; const SoAFloatPrecision dr = sqrt(dr2); const SoAFloatPrecision radI = radptr[indexFirst]; const SoAFloatPrecision radJ = radptr[j]; const SoAFloatPrecision rad = radI + radJ; if(dr >= rad | dr == 0) {continue;} const SoAFloatPrecision poissonI = poissonptr[indexFirst]; const SoAFloatPrecision youngI = youngptr[indexFirst]; const SoAFloatPrecision poissonJ = poissonptr[j]; const SoAFloatPrecision youngJ = youngptr[j]; const SoAFloatPrecision poissonI2 = poissonI * poissonI; const SoAFloatPrecision poissonJ2 = poissonJ * poissonJ; const SoAFloatPrecision e1 = (1. - poissonI2)/youngI; const SoAFloatPrecision e2 = (1. - poissonJ2)/youngJ; const SoAFloatPrecision esum = e1 + e2; const SoAFloatPrecision e = 1. / esum; const SoAFloatPrecision penDepth = rad - dr; const SoAFloatPrecision r = radI * radJ / rad; const SoAFloatPrecision fac = 4/3 * e * sqrt(r) * pow(penDepth, 3./2.) / dr; const SoAFloatPrecision fx = drx * fac; const SoAFloatPrecision fy = dry * fac; const SoAFloatPrecision fz = drz * fac; fxacc += fx; fyacc += fy; fzacc += fz; if (newton3) { fxptr[j] -= fx; fyptr[j] -= fy; fzptr[j] -= fz; } } if (fxacc != 0 or fyacc != 0 or fzacc != 0) { fxptr[indexFirst] += fxacc; fyptr[indexFirst] += fyacc; fzptr[indexFirst] += fzacc; } } // defines whether or whether not the global values are already preprocessed bool _postProcessed; }; } // namespace autopas
newton_parallel_mp.c
#include <math.h> #include <complex.h> #include <stdio.h> #include <stdlib.h> #include <omp.h> #include "timer.h" #define pi 3.14159265359 #define THREADS 16 int main(void) { int MaxCount = 1000; int xMin = -2; int xMax = 2; int yMin = -2; int yMax = 2; int steps = 5000; //NOTE: Runs steps^2 steps, represents steps done in real and imaginary axes float Tol = .0001; double complex r1 = 1 + 0*I; double complex r2 = -0.5 + sin(2 * pi / 3)*I; double complex r3 = -0.5 - sin(2 * pi / 3)*I; int points[4] = { 0 }; StartTimer(); omp_set_num_threads(THREADS); //#pragma omp parallel for default(shared) num_threads(THREADS) reduction(+:points) schedule(static,steps/THREADS) #pragma omp parallel for default(shared) num_threads(THREADS) reduction(+:points) schedule(dynamic) for (int y = 0; y < steps; y++) { for (int x = 0; x < steps; x++) { double complex z = (xMin + (xMax - xMin) * 1.0 * x / (steps - 1)) + (yMin + (yMax - yMin) * 1.0 * y / (steps - 1)) * I; int count = 0; while ((count < MaxCount) && cabs(z - r1) >= Tol && cabs(z - r2) >= Tol && cabs(z - r3) >= Tol) { if (cabs(z) > 0) { z = z - (z*z*z - 1.0) / (z*z*3.0); //change fraction to desired function divided by its derivative to change fractal function } count++; } if (cabs(z - r1) < Tol && abs(cimag(z)) < Tol) { points[1]++; } else if (cabs(z - r2) <= Tol && cimag(z) > -Tol) { points[2]++; } else if (cabs(z - r3) <= Tol && cimag(z) < Tol) { points[3]++; } else { points[0]++; } } } // Code below is a version of the above loop without the nested for loops. Ultimately ran worse for me on bridges2. /* #pragma omp parallel for default(shared) num_threads(THREADS) reduction(+:points) schedule(dynamic) for (int k = 0; k < (steps * steps); k++) { int y = k / steps; int x = k % steps; double complex z = (xMin + (xMax - xMin) * 1.0 * x / (steps - 1)) + (yMin + (yMax - yMin) * 1.0 * y / (steps - 1)) * I; int count = 0; while ((count < MaxCount) && cabs(z - r1) >= Tol && cabs(z - r2) >= Tol && cabs(z - r3) >= Tol) { if (cabs(z) > 0) { z = z - (z*z*z - 1.0) / (z*z*3.0); } count++; } if (cabs(z - r1) < Tol && abs(cimag(z)) < Tol) { points[1]++; } else if (cabs(z - r2) <= Tol && cimag(z) > -Tol) { points[2]++; } else if (cabs(z - r3) <= Tol && cimag(z) < Tol) { points[3]++; } else { points[0]++; } } */ double runtime = GetTimer(); printf("Newton Fractal for %d points:\n", steps * steps); printf("Points that converged to no roots : %d (%.2f%%)\n", points[0], 100.0 * points[0] / (steps * steps)); printf("Points that converged to root %.2f + %.2fi: %d (%.2f%%)\n", creal(r1), cimag(r1), points[1], 100.0 * points[1] / (steps * steps)); printf("Points that converged to root %.2f + %.2fi: %d (%.2f%%)\n", creal(r2), cimag(r2), points[2], 100.0 * points[2] / (steps * steps)); printf("Points that converged to root %.2f + %.2fi: %d (%.2f%%)\n", creal(r3), cimag(r3), points[3], 100.0 * points[3] / (steps * steps)); printf("Time taken: %f s\n", runtime / 1000); }
opencl_strip_fmt_plug.c
/* STRIP Password Manager cracker patch for JtR. Hacked together during * September of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com> */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_strip; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_strip); #else #include <string.h> #include <openssl/aes.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "options.h" #include "common.h" #include "misc.h" #include "common-opencl.h" #define FORMAT_LABEL "strip-opencl" #define FORMAT_NAME "STRIP Password Manager" #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN 1 #define SALT_ALIGN MEM_ALIGN_WORD #define uint8_t unsigned char #define uint16_t unsigned short #define uint32_t ARCH_WORD_32 #define ITERATIONS 4000 #define FILE_HEADER_SZ 16 #define SQLITE_FILE_HEADER "SQLite format 3" #define HMAC_SALT_MASK 0x3a #define FAST_PBKDF2_ITER 2 #define SQLITE_MAX_PAGE_SIZE 65536 static struct fmt_tests strip_tests[] = { /* test vector created by STRIP for Windows */ {"$strip$*66cd7a4ff7716f7b86cf587ce18eb39518e096eb152615ada8d007d9f035c20c711e62cbde96d8c3aad2a4658497a6119addc97ed3c970580cd666f301c63ce041a1748ee5c3861ada3cd6ee75b5d68891f731b3c2e3294b08e10ce3c23c2bfac158f8c45d0332791f64d1e3ad55e936d17a42fef5228e713b8188050c9a61c7f026af6203172cf2fc54c8b439e2260d7a00a4156713f92f8466de5c05cd8701e0d3d9cb3f392ae918e6900d5363886d4e1ed7e90da76b180ef9555c1cd358f6d1ee3755a208fee4d5aa1c776a0888200b21a3da6614d5fe2303e78c09563d862d19deecdc9f0ec7fbc015689a74f4eb477d9f22298b1b3f866ca4cb772d74821a1f8d03fd5fd0d020ffd41dd449b431ddf3bbfba3399311d9827be428202ee56e2c2a4e91f3415b4282c691f16cd447cf877b576ab963ea4ea3dc7d8c433febdc36607fd2372c4165abb59e3e75c28142f1f2575ecca6d97a9f782c3410151f8bbcbc65a42fdc59fdc4ecd8214a2bbd3a4562fac21c48f7fc69a4ecbcf664b4e435d7734fde5494e4d80019a0302e22565ed6a49b29cecf81077fd92f0105d18a421e04ee0deaca6389214abc7182db7003da7e267816531010b236eadfea20509718ff743ed5ad2828b6501dd84a371feed26f0514bbda69118a69048ebb71e3e2c54fb918422f1320724a353fe8d81a562197454d2c67443be8a4008a756aec0998386a5fd48e379befe966b42dfa6684ff049a61b51de5f874a12ab7d9ab33dc84738e036e294c22a07bebcc95be9999ab988a1fa1c944ab95be970045accb661249be8cc34fcc0680cb1aff8dfee21f586c571b1d09bf370c6fc131418201e0414acb2e4005b0b6fda1f3d73b7865823a008d1d3f45492a960dbdd6331d78d9e2e6a368f08ee3456b6d78df1d5630f825c536fff60bad23fb164d151d80a03b0c78edbfdee5c7183d7527e289428cf554ad05c9d75011f6b233744f12cd85fbb62f5d1ae22f43946f24a483a64377bf3fa16bf32cea1ab4363ef36206a5989e97ff847e5d645791571b9ecd1db194119b7663897b9175dd9cc123bcc7192eaf56d4a2779c502700e88c5c20b962943084bcdf024dc4f19ca649a860bdbd8f8f9b4a9d03027ae80f4a3168fc030859acb08a871950b024d27306cdc1a408b2b3799bb8c1f4b6ac3593aab42c962c979cd9e6f59d029f8d392315830cfcf4066bf03e0fc5c0f3630e9c796ddb38f51a2992b0a61d6ef115cb34d36c7d94b6c9d49dfe8d064d92b483f12c14fa10bf1170a575e4571836cef0a1fbf9f8b6968abda5e964bb16fd62fde1d1df0f5ee9c68ce568014f46f1717b6cd948b0da9a6f4128da338960dbbcbc9c9c3b486859c06e5e2338db3458646054ccd59bb940c7fc60cda34f633c26dde83bb717b75fefcbd09163f147d59a6524752a47cd94", "openwall"}, /* test vector created by STRIP Password Manager (for Android) */ {"$strip$*78adb0052203efa1bd1b02cac098cc9af1bf7e84ee2eaebaaba156bdcfe729ab12ee7ba8a84e79d11dbd67eee82bcb24be99dbd5db7f4c3a62f188ce4b48edf4ebf6cbf5a5869a61f83fbdb3cb4bf79b3c2c898f422d71eab31afdf3a8d4e97204dedbe7bd8b5e4c891f4880ca917c8b2f67ca06035e7f8db1fae91c45db6a08adf96ec5ddcb9e60b648acf883a7550ea5b67e2d27623e8de315f29cba48b8b1d1bde62283615ab88293b29ad73ae404a42b13e35a95770a504d81e335c00328a6290e411fa2708a697fab7c2d17ff5d0a3fe508118bb43c3d5e72ef563e0ffd337f559085a1373651ca2b8444f4437d8ac0c19aa0a24b248d1d283062afbc3b4ccc9b1861f59518eba771f1d9707affe0222ff946da7c014265ab4ba1f6417dd22d92e4adf5b7e462588f0a42e061a3dad041cbb312d8862aed3cf490df50b710a695517b0c8771a01f82db09231d392d825f5667012e349d2ed787edf8448bbb1ff548bee3a33392cd209e8b6c1de8202f6527d354c3858b5e93790c4807a8967b4c0321ed3a1d09280921650ac33308bd04f35fb72d12ff64a05300053358c5d018a62841290f600f7df0a7371b6fac9b41133e2509cb90f774d02e7202185b9641d063ed38535afb81590bfd5ad9a90107e4ff6d097ac8f35435f307a727f5021f190fc157956414bfce4818a1e5c6af187485683498dcc1d56c074c534a99125c6cfbf5242087c6b0ae10971b0ff6114a93616e1a346a22fcac4c8f6e5c4a19f049bbc7a02d2a31d39548f12440c36dbb253299a11b630e8fd88e7bfe58545d60dce5e8566a0a190d816cb775bd859b8623a7b076bce82c52e9cff6a2d221f9d3fd888ac30c7e3000ba8ed326881ffe911e27bb8982b56caa9a12065721269976517d2862e4a486b7ed143ee42c6566bba04c41c3371220f4843f26e328c33a5fb8450dadc466202ffc5c49cc95827916771e49e0602c3f8468537a81cf2fa1db34c090fccab6254436c05657cf29c3c415bb22a42adeac7870858bf96039b81c42c3d772509fdbe9a94eaf99ee9c59bac3ea97da31e9feac14ed53a0af5c5ebd2e81e40a5140da4f8a44048d5f414b0ba9bfb8024c7abaf5346fde6368162a045d1196f81d55ed746cc6cbd7a7c9cdbfa392279169626437da15a62730c2990772e106a5b84a60edaa6c5b8030e1840aa6361f39a12121a1e33b9e63fb2867d6241de1fb6e2cd1bd9a78c7122258d052ea53a4bff4e097ed49fc17b9ec196780f4c6506e74a5abb10c2545e6f7608d2eefad179d54ad31034576be517affeb3964c65562538dd6ea7566a52c75e4df593895539609a44097cb6d31f438e8f7717ce2bf777c76c22d60b15affeb89f08084e8f316be3f4aefa4fba8ec2cc1dc845c7affbc0ce5ebccdbfde5ebab080a285f02bdfb76c6dbd243e5ee1e5d", "p@$$w0rD"}, {NULL} }; #define OCL_CONFIG "strip" typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } strip_password; typedef struct { uint32_t v[32/4]; } strip_hash; typedef struct { uint8_t length; uint8_t salt[20]; int iterations; int outlen; } strip_salt; static int *cracked; static int any_cracked; static struct custom_salt { unsigned char salt[16]; unsigned char data[1024]; } *cur_salt; static cl_int cl_error; static strip_password *inbuffer; static strip_hash *outbuffer; static strip_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; size_t insize, outsize, settingsize, cracked_size; #define MIN(a, b) (((a) > (b)) ? (b) : (a)) #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #define OCL_CONFIG "strip" #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static size_t get_task_max_size() { return 0; } static size_t get_default_workgroup() { if (cpu(device_info[gpu_id])) return get_platform_vendor_id(platform_id) == DEV_INTEL ? 8 : 1; else return 64; } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(strip_password) * gws; outsize = sizeof(strip_hash) * gws; settingsize = sizeof(strip_salt); cracked_size = sizeof(*cracked) * gws; inbuffer = mem_calloc(insize); outbuffer = mem_alloc(outsize); cracked = mem_calloc(cracked_size); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(cracked); } static void done(void) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); } static void init(struct fmt_main *self) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", PLAINTEXT_LENGTH, (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(strip_password), 0); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; if (strncmp(ciphertext, "$strip$", 7)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 7; if ((p = strtok(ctcopy, "*")) == NULL) /* salt + data */ goto err; if (strlen(p) != 2048) goto err; if (!ishex(p)) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; ctcopy += 7; /* skip over "$strip$" */ p = strtok(ctcopy, "*"); for (i = 0; i < 16; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; for (; i < 1024; i++) cs.data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, 16); currentsalt.length = 16; currentsalt.iterations = ITERATIONS; currentsalt.outlen = 32; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } #undef set_key static void set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } /* verify validity of page */ static int verify_page(unsigned char *page1) { uint32_t pageSize; uint32_t usableSize; if (memcmp(page1, SQLITE_FILE_HEADER, 16) != 0) { return -1; } if (page1[19] > 2) { return -1; } if (memcmp(&page1[21], "\100\040\040", 3) != 0) { return -1; } pageSize = (page1[16] << 8) | (page1[17] << 16); if (((pageSize - 1) & pageSize) != 0 || pageSize > SQLITE_MAX_PAGE_SIZE || pageSize <= 256) { return -1; } if ((pageSize & 7) != 0) { return -1; } usableSize = pageSize - page1[20]; if (usableSize < 480) { return -1; } return 0; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index; global_work_size = (((count + local_work_size - 1) / local_work_size) * local_work_size); if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } /// Copy data to gpu HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { unsigned char master[32]; unsigned char output[1024]; unsigned char *iv_in; unsigned char iv_out[16]; int size; int page_sz = 1008; /* 1024 - strlen(SQLITE_FILE_HEADER) */ int reserve_sz = 16; /* for HMAC off case */ AES_KEY akey; memcpy(master, outbuffer[index].v, 32); memcpy(output, SQLITE_FILE_HEADER, FILE_HEADER_SZ); size = page_sz - reserve_sz; iv_in = cur_salt->data + size + 16; memcpy(iv_out, iv_in, 16); if (AES_set_decrypt_key(master, 256, &akey) < 0) { fprintf(stderr, "AES_set_decrypt_key failed!\n"); } /* decrypting 24 bytes is enough */ AES_cbc_encrypt(cur_salt->data + 16, output + 16, 24, &akey, iv_out, AES_DECRYPT); if (verify_page(output) == 0) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_opencl_strip = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT, #if FMT_MAIN_VERSION > 11 { NULL }, #endif strip_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
SE_fg_grid_split_thrd_mex.c
#include "mex.h" #include "SE_fgg.h" #include "fgg_thrd.h" void SE_FGG_MEX_params(SE_FGG_params*, const mxArray*, int); #define X prhs[0] #define Q prhs[1] #define OPT prhs[2] #define ZS prhs[3] #define ZX prhs[4] #define ZY prhs[5] #define ZZ prhs[6] #define IDX prhs[7] #define H_OUT plhs[0] // Output #ifndef VERBOSE #define VERBOSE 0 #endif void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { const int N = mxGetM(X); double* restrict x = mxGetPr(X); double* restrict q = mxGetPr(Q); // pack parameters SE_FGG_params params; SE_FGG_MEX_params(&params, OPT, N); // scratch arrays SE_FGG_work work; SE_FGG_allocate_workspace(&work, &params, false, false); // attach pre-computed quantities work.zs = mxGetPr(ZS); work.zx = mxGetPr(ZX); work.zy = mxGetPr(ZY); work.zz = mxGetPr(ZZ); work.idx = (int*)mxGetData(IDX); // allocate output array size_t dims[3] = {params.dims[0], params.dims[1], params.dims[2]}; H_OUT = mxCreateNumericArray(3, dims, mxDOUBLE_CLASS, mxREAL); double* H_per = mxGetPr(H_OUT); SE_fp_set_zero(H_per, SE_prod3(params.dims)); // coordinates and charges const SE_state st = {.x = x, .q = q}; if(VERBOSE) mexPrintf("[SE%s FG(g) THRD] N=%d, P=%d\n",PER_STR,N,params.P); // now do the work #if FGG_THRD #pragma omp parallel #else #error "Threading must be activated with -DFGG_THRD and OpenMP flag" #endif { #ifdef __AVX__ SE_FGG_grid_split_AVX_dispatch(&work, &st, &params); #else SE_FGG_grid_split_SSE_dispatch(&work, &st, &params); #endif } #ifdef THREE_PERIODIC SE_FGG_wrap_fcn(H_per, &work, &params); #endif #ifdef TWO_PERIODIC SE2P_FGG_wrap_fcn(H_per, &work, &params); #endif #ifdef ONE_PERIODIC SE1P_FGG_wrap_fcn(H_per, &work, &params); #endif // done SE_FGG_free_workspace(&work); }
atax_teams.c
/** * atax.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <omp.h> #include "../../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.5 /* Problem size. */ #define NX 8192 #define NY 8192 #ifndef M_PI #define M_PI 3.14159 #endif /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_array(DATA_TYPE *x, DATA_TYPE *A) { int i, j; for (i = 0; i < NX; i++) { x[i] = i * M_PI; for (j = 0; j < NY; j++) { A[i*NY + j] = ((DATA_TYPE) i*(j)) / NX; } } } void compareResults(DATA_TYPE *z, DATA_TYPE *z_outputFromGpu) { int i, fail; fail = 0; for (i=0; i<NY; i++) { if (percentDiff(z[i], z_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void atax_cpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp) { int i,j; for (i= 0; i < NY; i++) { y[i] = 0; } for (i = 0; i < NX; i++) { tmp[i] = 0; for (j = 0; j < NY; j++) { tmp[i] = tmp[i] + A[i*NY + j] * x[j]; } for (j = 0; j < NY; j++) { y[j] = y[j] + A[i*NY + j] * tmp[i]; } } } void atax_OMP(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp) { int i,j; for (i= 0; i < NY; i++) { y[i] = 0; } #pragma omp target teams distribute parallel for map(to:A[:NX*NY], x[:NY]) map(from: tmp[:NX]) for (i = 0; i < NX; i++) { tmp[i] = 0; int j; for (j = 0; j < NY; j++) { tmp[i] = tmp[i] + A[i*NY + j] * x[j]; } } #pragma omp target teams distribute parallel for map(to:A[:NX*NY], tmp[:NX]) map(from: y[:NY]) for (j = 0; j < NY; j++) for (i = 0; i < NX; i++){ { y[j] = y[j] + A[i*NY + j] * tmp[i]; } } } int main(int argc, char** argv) { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* x; DATA_TYPE* y; DATA_TYPE* y_outputFromGpu; DATA_TYPE* tmp; A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); x = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); y = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); y_outputFromGpu = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); tmp = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE)); fprintf(stdout, "<< Matrix Transpose and Vector Multiplication >>\n"); init_array(x, A); t_start = rtclock(); atax_OMP(A, x, y_outputFromGpu, tmp); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); t_start = rtclock(); atax_cpu(A, x, y, tmp); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(y, y_outputFromGpu); free(A); free(x); free(y); free(y_outputFromGpu); free(tmp); return 0; }
GB_unaryop__ainv_int16_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int16_uint32 // op(A') function: GB_tran__ainv_int16_uint32 // C type: int16_t // A type: uint32_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int16_uint32 ( int16_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int16_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mtSpGEMM.h
#ifndef _mtSpGEMM_h #define _mtSpGEMM_h #include "CombBLAS.h" namespace combblas { /* Multithreaded prefix sum Inputs: in: an input array size: the length of the input array "in" nthreads: number of threads used to compute the prefix sum Output: return an array of size "size+1" the memory of the output array is allocated internallay Example: in = [2, 1, 3, 5] out = [0, 2, 3, 6, 11] */ template <typename T> T* prefixsum(T* in, int size, int nthreads) { std::vector<T> tsum(nthreads+1); tsum[0] = 0; T* out = new T[size+1]; out[0] = 0; T* psum = &out[1]; #ifdef THREADED #pragma omp parallel #endif { int ithread = 0; #ifdef THREADED ithread = omp_get_thread_num(); #endif T sum = 0; #ifdef THREADED #pragma omp for schedule(static) #endif for (int i=0; i<size; i++) { sum += in[i]; psum[i] = sum; } tsum[ithread+1] = sum; #ifdef THREADED #pragma omp barrier #endif T offset = 0; for(int i=0; i<(ithread+1); i++) { offset += tsum[i]; } #ifdef THREADED #pragma omp for schedule(static) #endif for (int i=0; i<size; i++) { psum[i] += offset; } } return out; } // multithreaded HeapSpGEMM template <typename SR, typename NTO, typename IT, typename NT1, typename NT2> SpTuples<IT, NTO> * LocalSpGEMM (const SpDCCols<IT, NT1> & A, const SpDCCols<IT, NT2> & B, bool clearA, bool clearB) { IT mdim = A.getnrow(); IT ndim = B.getncol(); IT nnzA = A.getnnz(); if(A.isZero() || B.isZero()) { return new SpTuples<IT, NTO>(0, mdim, ndim); } Dcsc<IT,NT1>* Adcsc = A.GetDCSC(); Dcsc<IT,NT2>* Bdcsc = B.GetDCSC(); IT nA = A.getncol(); float cf = static_cast<float>(nA+1) / static_cast<float>(Adcsc->nzc); IT csize = static_cast<IT>(ceil(cf)); // chunk size IT * aux; Adcsc->ConstructAux(nA, aux); int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif IT* colnnzC = estimateNNZ(A, B, aux,false); // don't free aux IT* colptrC = prefixsum<IT>(colnnzC, Bdcsc->nzc, numThreads); delete [] colnnzC; IT nnzc = colptrC[Bdcsc->nzc]; std::tuple<IT,IT,NTO> * tuplesC = static_cast<std::tuple<IT,IT,NTO> *> (::operator new (sizeof(std::tuple<IT,IT,NTO>[nnzc]))); // thread private space for heap and colinds std::vector<std::vector< std::pair<IT,IT>>> colindsVec(numThreads); std::vector<std::vector<HeapEntry<IT,NT1>>> globalheapVec(numThreads); for(int i=0; i<numThreads; i++) //inital allocation per thread, may be an overestimate, but does not require more memoty than inputs { colindsVec[i].resize(nnzA/numThreads); globalheapVec[i].resize(nnzA/numThreads); } size_t Bnzc = (size_t) Bdcsc->nzc; #ifdef THREADED #pragma omp parallel for #endif for(size_t i=0; i < Bnzc; ++i) { size_t nnzcolB = Bdcsc->cp[i+1] - Bdcsc->cp[i]; //nnz in the current column of B int myThread = 0; #ifdef THREADED myThread = omp_get_thread_num(); #endif if(colindsVec[myThread].size() < nnzcolB) //resize thread private vectors if needed { colindsVec[myThread].resize(nnzcolB); globalheapVec[myThread].resize(nnzcolB); } // colinds.first vector keeps indices to A.cp, i.e. it dereferences "colnums" vector (above), // colinds.second vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack) Adcsc->FillColInds(Bdcsc->ir + Bdcsc->cp[i], nnzcolB, colindsVec[myThread], aux, csize); std::pair<IT,IT> * colinds = colindsVec[myThread].data(); HeapEntry<IT,NT1> * wset = globalheapVec[myThread].data(); IT hsize = 0; for(size_t j = 0; j < nnzcolB; ++j) // create the initial heap { if(colinds[j].first != colinds[j].second) // current != end { wset[hsize++] = HeapEntry< IT,NT1 > (Adcsc->ir[colinds[j].first], j, Adcsc->numx[colinds[j].first]); } } std::make_heap(wset, wset+hsize); IT curptr = colptrC[i]; while(hsize > 0) { std::pop_heap(wset, wset + hsize); // result is stored in wset[hsize-1] IT locb = wset[hsize-1].runr; // relative location of the nonzero in B's current column NTO mrhs = SR::multiply(wset[hsize-1].num, Bdcsc->numx[Bdcsc->cp[i]+locb]); if (!SR::returnedSAID()) { if( (curptr > colptrC[i]) && std::get<0>(tuplesC[curptr-1]) == wset[hsize-1].key) { std::get<2>(tuplesC[curptr-1]) = SR::add(std::get<2>(tuplesC[curptr-1]), mrhs); } else { tuplesC[curptr++]= std::make_tuple(wset[hsize-1].key, Bdcsc->jc[i], mrhs) ; } } if( (++(colinds[locb].first)) != colinds[locb].second) // current != end { // runr stays the same ! wset[hsize-1].key = Adcsc->ir[colinds[locb].first]; wset[hsize-1].num = Adcsc->numx[colinds[locb].first]; std::push_heap(wset, wset+hsize); } else { --hsize; } } } if(clearA) delete const_cast<SpDCCols<IT, NT1> *>(&A); if(clearB) delete const_cast<SpDCCols<IT, NT2> *>(&B); delete [] colptrC; delete [] aux; SpTuples<IT, NTO>* spTuplesC = new SpTuples<IT, NTO> (nnzc, mdim, ndim, tuplesC, true, true); return spTuplesC; } template <typename IT, typename NT> bool sort_less(const std::pair<IT, NT> &left, const std::pair<IT, NT> &right) { return left.first < right.first; } // Hybrid approach of multithreaded HeapSpGEMM and HashSpGEMM template <typename SR, typename NTO, typename IT, typename NT1, typename NT2> SpTuples<IT, NTO> * LocalHybridSpGEMM (const SpDCCols<IT, NT1> & A, const SpDCCols<IT, NT2> & B, bool clearA, bool clearB, IT * aux = nullptr) { IT mdim = A.getnrow(); IT ndim = B.getncol(); IT nnzA = A.getnnz(); if(A.isZero() || B.isZero()) { return new SpTuples<IT, NTO>(0, mdim, ndim); } Dcsc<IT,NT1>* Adcsc = A.GetDCSC(); Dcsc<IT,NT2>* Bdcsc = B.GetDCSC(); IT nA = A.getncol(); float cf = static_cast<float>(nA+1) / static_cast<float>(Adcsc->nzc); IT csize = static_cast<IT>(ceil(cf)); // chunk size //IT * aux; bool deleteAux = false; if(aux==nullptr) { deleteAux = true; Adcsc->ConstructAux(nA, aux); } int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif // std::cout << "numThreads: " << numThreads << std::endl; IT* flopC = estimateFLOP(A, B, aux); //IT* flopptr = prefixsum<IT>(flopC, Bdcsc->nzc, numThreads); //IT flop = flopptr[Bdcsc->nzc]; // std::cout << "FLOP of A * B is " << flop << std::endl; IT* colnnzC = estimateNNZ_Hash(A, B, flopC, aux); IT* flopptr = prefixsum<IT>(flopC, Bdcsc->nzc, numThreads); IT flop = flopptr[Bdcsc->nzc]; IT* colptrC = prefixsum<IT>(colnnzC, Bdcsc->nzc, numThreads); delete [] colnnzC; delete [] flopC; IT nnzc = colptrC[Bdcsc->nzc]; //double compression_ratio = (double)flop / nnzc; // std::cout << "NNZ of A * B is " << nnzc << std::endl; // std::cout << "Compression ratio is " << compression_ratio << std::endl; std::tuple<IT,IT,NTO> * tuplesC = static_cast<std::tuple<IT,IT,NTO> *> (::operator new (sizeof(std::tuple<IT,IT,NTO>[nnzc]))); //std::tuple<IT,IT,NTO> * tuplesC = new std::tuple<IT,IT,NTO>[nnzc]; // thread private space for heap and colinds std::vector<std::vector< std::pair<IT,IT>>> colindsVec(numThreads); std::vector<std::vector< std::pair<IT,NTO>>> globalHashVecAll(numThreads); std::vector<std::vector< HeapEntry<IT,NT1>>> globalHeapVecAll(numThreads); /* for(int i=0; i<numThreads; i++) //inital allocation per thread, may be an overestimate, but does not require more memoty than inputs { colindsVec[i].resize(nnzA/numThreads); }*/ // IT hashSelected = 0; #ifdef THREADED #pragma omp parallel for #endif for(size_t i=0; i < Bdcsc->nzc; ++i) { size_t nnzcolB = Bdcsc->cp[i+1] - Bdcsc->cp[i]; //nnz in the current column of B int myThread = 0; #ifdef THREADED myThread = omp_get_thread_num(); #endif if(colindsVec[myThread].size() < nnzcolB) //resize thread private vectors if needed { colindsVec[myThread].resize(nnzcolB); } // colinds.first vector keeps indices to A.cp, i.e. it dereferences "colnums" vector (above), // colinds.second vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack) Adcsc->FillColInds(Bdcsc->ir + Bdcsc->cp[i], nnzcolB, colindsVec[myThread], aux, csize); std::pair<IT,IT> * colinds = colindsVec[myThread].data(); double cr = static_cast<double>(flopptr[i+1] - flopptr[i]) / (colptrC[i+1] - colptrC[i]); if (cr < 2.0) // Heap Algorithm { if(globalHeapVecAll[myThread].size() < nnzcolB) globalHeapVecAll[myThread].resize(nnzcolB); //std::vector<HeapEntry<IT,NT1>> globalheapVec(nnzcolB); //HeapEntry<IT, NT1> * wset = globalheapVec.data(); HeapEntry<IT, NT1> * wset = globalHeapVecAll[myThread].data(); IT hsize = 0; for(size_t j = 0; j < nnzcolB; ++j) // create the initial heap { if(colinds[j].first != colinds[j].second) // current != end { wset[hsize++] = HeapEntry< IT,NT1 > (Adcsc->ir[colinds[j].first], j, Adcsc->numx[colinds[j].first]); } } std::make_heap(wset, wset+hsize); IT curptr = colptrC[i]; while(hsize > 0) { std::pop_heap(wset, wset + hsize); // result is stored in wset[hsize-1] IT locb = wset[hsize-1].runr; // relative location of the nonzero in B's current column NTO mrhs = SR::multiply(wset[hsize-1].num, Bdcsc->numx[Bdcsc->cp[i]+locb]); if (!SR::returnedSAID()) { if( (curptr > colptrC[i]) && std::get<0>(tuplesC[curptr-1]) == wset[hsize-1].key) { std::get<2>(tuplesC[curptr-1]) = SR::add(std::get<2>(tuplesC[curptr-1]), mrhs); } else { tuplesC[curptr++]= std::make_tuple(wset[hsize-1].key, Bdcsc->jc[i], mrhs) ; } } if( (++(colinds[locb].first)) != colinds[locb].second) // current != end { // runr stays the same ! wset[hsize-1].key = Adcsc->ir[colinds[locb].first]; wset[hsize-1].num = Adcsc->numx[colinds[locb].first]; std::push_heap(wset, wset+hsize); } else { --hsize; } } } // Finish Heap else // Hash Algorithm { // #pragma omp atomic // hashSelected++; const IT minHashTableSize = 16; const IT hashScale = 107; size_t nnzcolC = colptrC[i+1] - colptrC[i]; //nnz in the current column of C (=Output) size_t ht_size = minHashTableSize; while(ht_size < nnzcolC) //ht_size is set as 2^n { ht_size <<= 1; } if(globalHashVecAll[myThread].size() < ht_size) globalHashVecAll[myThread].resize(ht_size); //std::vector<HeapEntry<IT,NT1>> globalheapVec(nnzcolB); //HeapEntry<IT, NT1> * wset = globalheapVec.data(); //HeapEntry<IT, NT1> * wset = globalheapVecAll[myThread].data(); //std::vector< std::pair<IT,NTO>> globalHashVec(ht_size); std::pair<IT,NTO>* globalHashVec = globalHashVecAll[myThread].data(); // colinds.first vector keeps indices to A.cp, i.e. it dereferences "colnums" vector (above), // colinds.second vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack) // Initialize hash tables for(size_t j=0; j < ht_size; ++j) { globalHashVec[j].first = -1; } // Multiply and add on Hash table for (size_t j=0; j < nnzcolB; ++j) { IT t_bcol = Bdcsc->ir[Bdcsc->cp[i] + j]; NT2 t_bval = Bdcsc->numx[Bdcsc->cp[i] + j]; for (IT k = colinds[j].first; k < colinds[j].second; ++k) { NTO mrhs = SR::multiply(Adcsc->numx[k], t_bval); IT key = Adcsc->ir[k]; IT hash = (key*hashScale) & (ht_size-1); while (1) //hash probing { if (globalHashVec[hash].first == key) //key is found in hash table { globalHashVec[hash].second = SR::add(mrhs, globalHashVec[hash].second); break; } else if (globalHashVec[hash].first == -1) //key is not registered yet { globalHashVec[hash].first = key; globalHashVec[hash].second = mrhs; break; } else //key is not found { hash = (hash+1) & (ht_size-1); } } } } // gather non-zero elements from hash table, and then sort them by row indices size_t index = 0; for (size_t j=0; j < ht_size; ++j) { if (globalHashVec[j].first != -1) { globalHashVec[index++] = globalHashVec[j]; } } //std::sort(globalHashVec.begin(), globalHashVec.begin() + index, sort_less<IT, NTO>); std::sort(globalHashVecAll[myThread].begin(), globalHashVecAll[myThread].begin() + index, sort_less<IT, NTO>); IT curptr = colptrC[i]; for (size_t j=0; j < index; ++j) { tuplesC[curptr++]= std::make_tuple(globalHashVec[j].first, Bdcsc->jc[i], globalHashVec[j].second); } } } if(clearA) delete const_cast<SpDCCols<IT, NT1> *>(&A); if(clearB) delete const_cast<SpDCCols<IT, NT2> *>(&B); delete [] colptrC; delete [] flopptr; if(deleteAux) delete [] aux; SpTuples<IT, NTO>* spTuplesC = new SpTuples<IT, NTO> (nnzc, mdim, ndim, tuplesC, true, true); // std::cout << "localspgemminfo," << flop << "," << nnzc << "," << compression_ratio << "," << t1-t0 << std::endl; // std::cout << hashSelected << ", " << Bdcsc->nzc << ", " << (float)hashSelected / Bdcsc->nzc << std::endl; return spTuplesC; } // Hybrid approach of multithreaded HeapSpGEMM and HashSpGEMM template <typename SR, typename NTO, typename IT, typename NT1, typename NT2> SpTuples<IT, NTO> * LocalSpGEMMHash (const SpDCCols<IT, NT1> & A, const SpDCCols<IT, NT2> & B, bool clearA, bool clearB, bool sort=true) { double t0=MPI_Wtime(); IT mdim = A.getnrow(); IT ndim = B.getncol(); IT nnzA = A.getnnz(); if(A.isZero() || B.isZero()) { return new SpTuples<IT, NTO>(0, mdim, ndim); } Dcsc<IT,NT1>* Adcsc = A.GetDCSC(); Dcsc<IT,NT2>* Bdcsc = B.GetDCSC(); IT nA = A.getncol(); float cf = static_cast<float>(nA+1) / static_cast<float>(Adcsc->nzc); IT csize = static_cast<IT>(ceil(cf)); // chunk size IT * aux; Adcsc->ConstructAux(nA, aux); int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif // std::cout << "numThreads: " << numThreads << std::endl; IT* flopC = estimateFLOP(A, B); IT* flopptr = prefixsum<IT>(flopC, Bdcsc->nzc, numThreads); IT flop = flopptr[Bdcsc->nzc]; // std::cout << "FLOP of A * B is " << flop << std::endl; IT* colnnzC = estimateNNZ_Hash(A, B, flopC); IT* colptrC = prefixsum<IT>(colnnzC, Bdcsc->nzc, numThreads); delete [] colnnzC; delete [] flopC; IT nnzc = colptrC[Bdcsc->nzc]; double compression_ratio = (double)flop / nnzc; // std::cout << "NNZ of A * B is " << nnzc << std::endl; // std::cout << "Compression ratio is " << compression_ratio << std::endl; // std::tuple<IT,IT,NTO> * tuplesC = static_cast<std::tuple<IT,IT,NTO> *> (::operator new (sizeof(std::tuple<IT,IT,NTO>[nnzc]))); std::tuple<IT,IT,NTO> * tuplesC = new std::tuple<IT,IT,NTO>[nnzc]; // thread private space for heap and colinds std::vector<std::vector< std::pair<IT,IT>>> colindsVec(numThreads); for(int i=0; i<numThreads; i++) //inital allocation per thread, may be an overestimate, but does not require more memoty than inputs { colindsVec[i].resize(nnzA/numThreads); } // IT hashSelected = 0; #ifdef THREADED #pragma omp parallel for #endif for(size_t i=0; i < Bdcsc->nzc; ++i) { size_t nnzcolB = Bdcsc->cp[i+1] - Bdcsc->cp[i]; //nnz in the current column of B int myThread = 0; #ifdef THREADED myThread = omp_get_thread_num(); #endif if(colindsVec[myThread].size() < nnzcolB) //resize thread private vectors if needed { colindsVec[myThread].resize(nnzcolB); } // colinds.first vector keeps indices to A.cp, i.e. it dereferences "colnums" vector (above), // colinds.second vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack) Adcsc->FillColInds(Bdcsc->ir + Bdcsc->cp[i], nnzcolB, colindsVec[myThread], aux, csize); std::pair<IT,IT> * colinds = colindsVec[myThread].data(); // #pragma omp atomic // hashSelected++; const IT minHashTableSize = 16; const IT hashScale = 107; size_t nnzcolC = colptrC[i+1] - colptrC[i]; //nnz in the current column of C (=Output) size_t ht_size = minHashTableSize; while(ht_size < nnzcolC) //ht_size is set as 2^n { ht_size <<= 1; } std::vector< std::pair<IT,NTO>> globalHashVec(ht_size); // colinds.first vector keeps indices to A.cp, i.e. it dereferences "colnums" vector (above), // colinds.second vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack) // Initialize hash tables for(size_t j=0; j < ht_size; ++j) { globalHashVec[j].first = -1; } // Multiply and add on Hash table for (size_t j=0; j < nnzcolB; ++j) { IT t_bcol = Bdcsc->ir[Bdcsc->cp[i] + j]; NT2 t_bval = Bdcsc->numx[Bdcsc->cp[i] + j]; for (IT k = colinds[j].first; k < colinds[j].second; ++k) { NTO mrhs = SR::multiply(Adcsc->numx[k], t_bval); IT key = Adcsc->ir[k]; IT hash = (key*hashScale) & (ht_size-1); while (1) //hash probing { if (globalHashVec[hash].first == key) //key is found in hash table { globalHashVec[hash].second = SR::add(mrhs, globalHashVec[hash].second); break; } else if (globalHashVec[hash].first == -1) //key is not registered yet { globalHashVec[hash].first = key; globalHashVec[hash].second = mrhs; break; } else //key is not found { hash = (hash+1) & (ht_size-1); } } } } if(sort) { // gather non-zero elements from hash table, and then sort them by row indices size_t index = 0; for (size_t j=0; j < ht_size; ++j) { if (globalHashVec[j].first != -1) { globalHashVec[index++] = globalHashVec[j]; } } std::sort(globalHashVec.begin(), globalHashVec.begin() + index, sort_less<IT, NTO>); IT curptr = colptrC[i]; for (size_t j=0; j < index; ++j) { tuplesC[curptr++]= std::make_tuple(globalHashVec[j].first, Bdcsc->jc[i], globalHashVec[j].second); } } else { IT curptr = colptrC[i]; for (size_t j=0; j < ht_size; ++j) { if (globalHashVec[j].first != -1) { tuplesC[curptr++]= std::make_tuple(globalHashVec[j].first, Bdcsc->jc[i], globalHashVec[j].second); } } } } if(clearA) delete const_cast<SpDCCols<IT, NT1> *>(&A); if(clearB) delete const_cast<SpDCCols<IT, NT2> *>(&B); delete [] colptrC; delete [] flopptr; delete [] aux; SpTuples<IT, NTO>* spTuplesC = new SpTuples<IT, NTO> (nnzc, mdim, ndim, tuplesC, true, false); double t1=MPI_Wtime(); // std::cout << "localspgemminfo," << flop << "," << nnzc << "," << compression_ratio << "," << t1-t0 << std::endl; // std::cout << hashSelected << ", " << Bdcsc->nzc << ", " << (float)hashSelected / Bdcsc->nzc << std::endl; return spTuplesC; } /* * Estimates total flops necessary to multiply A and B * Then returns the number * */ template <typename SR, typename IT, typename NT1, typename NT2> IT EstimateLocalFLOP (const SpDCCols<IT, NT1> & A, const SpDCCols<IT, NT2> & B, bool clearA, bool clearB) { Dcsc<IT,NT1>* Adcsc = A.GetDCSC(); Dcsc<IT,NT2>* Bdcsc = B.GetDCSC(); int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif IT* flopC = estimateFLOP(A, B); IT* flopptr = prefixsum<IT>(flopC, Bdcsc->nzc, numThreads); IT flop = flopptr[Bdcsc->nzc]; delete [] flopC; if(clearA) delete const_cast<SpDCCols<IT, NT1> *>(&A); if(clearB) delete const_cast<SpDCCols<IT, NT2> *>(&B); delete [] flopptr; return flop; } // estimate space for result of SpGEMM template <typename IT, typename NT1, typename NT2> IT* estimateNNZ(const SpDCCols<IT, NT1> & A,const SpDCCols<IT, NT2> & B, IT * aux = nullptr, bool freeaux = true) { IT nnzA = A.getnnz(); if(A.isZero() || B.isZero()) { return NULL; } Dcsc<IT,NT1>* Adcsc = A.GetDCSC(); Dcsc<IT,NT2>* Bdcsc = B.GetDCSC(); float cf = static_cast<float>(A.getncol()+1) / static_cast<float>(Adcsc->nzc); IT csize = static_cast<IT>(ceil(cf)); // chunk size if(aux == nullptr) { Adcsc->ConstructAux(A.getncol(), aux); } int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif IT* colnnzC = new IT[Bdcsc->nzc]; // nnz in every nonempty column of C #ifdef THREADED #pragma omp parallel for #endif for(IT i=0; i< Bdcsc->nzc; ++i) { colnnzC[i] = 0; } // thread private space for heap and colinds std::vector<std::vector< std::pair<IT,IT>>> colindsVec(numThreads); std::vector<std::vector<std::pair<IT,IT>>> globalheapVec(numThreads); for(int i=0; i<numThreads; i++) //inital allocation per thread, may be an overestimate, but does not require more memoty than inputs { colindsVec[i].resize(nnzA/numThreads); globalheapVec[i].resize(nnzA/numThreads); } #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i < Bdcsc->nzc; ++i) { size_t nnzcolB = Bdcsc->cp[i+1] - Bdcsc->cp[i]; //nnz in the current column of B int myThread = 0; #ifdef THREADED myThread = omp_get_thread_num(); #endif if(colindsVec[myThread].size() < nnzcolB) //resize thread private vectors if needed { colindsVec[myThread].resize(nnzcolB); globalheapVec[myThread].resize(nnzcolB); } // colinds.first vector keeps indices to A.cp, i.e. it dereferences "colnums" vector (above), // colinds.second vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack) Adcsc->FillColInds(Bdcsc->ir + Bdcsc->cp[i], nnzcolB, colindsVec[myThread], aux, csize); std::pair<IT,IT> * colinds = colindsVec[myThread].data(); std::pair<IT,IT> * curheap = globalheapVec[myThread].data(); IT hsize = 0; // create the initial heap for(IT j = 0; (unsigned)j < nnzcolB; ++j) { if(colinds[j].first != colinds[j].second) { curheap[hsize++] = std::make_pair(Adcsc->ir[colinds[j].first], j); } } std::make_heap(curheap, curheap+hsize, std::greater<std::pair<IT,IT>>()); IT prevRow=-1; // previously popped row from heap while(hsize > 0) { std::pop_heap(curheap, curheap + hsize, std::greater<std::pair<IT,IT>>()); // result is stored in wset[hsize-1] IT locb = curheap[hsize-1].second; if( curheap[hsize-1].first != prevRow) { prevRow = curheap[hsize-1].first; colnnzC[i] ++; } if( (++(colinds[locb].first)) != colinds[locb].second) // current != end { curheap[hsize-1].first = Adcsc->ir[colinds[locb].first]; std::push_heap(curheap, curheap+hsize, std::greater<std::pair<IT,IT>>()); } else { --hsize; } } } if (freeaux) delete [] aux; return colnnzC; } // estimate space for result of SpGEMM with Hash template <typename IT, typename NT1, typename NT2> IT* estimateNNZ_Hash(const SpDCCols<IT, NT1> & A,const SpDCCols<IT, NT2> & B, IT *flopC, IT * aux=nullptr) { IT nnzA = A.getnnz(); if(A.isZero() || B.isZero()) { return NULL; } Dcsc<IT,NT1>* Adcsc = A.GetDCSC(); Dcsc<IT,NT2>* Bdcsc = B.GetDCSC(); float cf = static_cast<float>(A.getncol()+1) / static_cast<float>(Adcsc->nzc); IT csize = static_cast<IT>(ceil(cf)); // chunk size bool deleteAux = false; if(aux==nullptr) { deleteAux = true; Adcsc->ConstructAux(A.getncol(), aux); } int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif IT* colnnzC = new IT[Bdcsc->nzc]; // nnz in every nonempty column of C /* #ifdef THREADED #pragma omp parallel for #endif for(IT i=0; i< Bdcsc->nzc; ++i) { colnnzC[i] = 0; } */ // thread private space for heap and colinds std::vector<std::vector< std::pair<IT,IT>>> colindsVec(numThreads); std::vector<std::vector< IT>> globalHashVecAll(numThreads); /* for(int i=0; i<numThreads; i++) //inital allocation per thread, may be an overestimate, but does not require more memoty than inputs { colindsVec[i].resize(nnzA/numThreads); }*/ #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i < Bdcsc->nzc; ++i) { colnnzC[i] = 0; size_t nnzcolB = Bdcsc->cp[i+1] - Bdcsc->cp[i]; //nnz in the current column of B int myThread = 0; #ifdef THREADED myThread = omp_get_thread_num(); #endif if(colindsVec[myThread].size() < nnzcolB) //resize thread private vectors if needed { colindsVec[myThread].resize(nnzcolB); } // colinds.first vector keeps indices to A.cp, i.e. it dereferences "colnums" vector (above), // colinds.second vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack) Adcsc->FillColInds(Bdcsc->ir + Bdcsc->cp[i], nnzcolB, colindsVec[myThread], aux, csize); std::pair<IT,IT> * colinds = colindsVec[myThread].data(); // Hash const IT minHashTableSize = 16; const IT hashScale = 107; // Initialize hash tables IT ht_size = minHashTableSize; while(ht_size < flopC[i]) //ht_size is set as 2^n { ht_size <<= 1; } if(globalHashVecAll[myThread].size() < ht_size) //resize thread private vectors if needed { globalHashVecAll[myThread].resize(ht_size); } IT* globalHashVec = globalHashVecAll[myThread].data(); for(IT j=0; (unsigned)j < ht_size; ++j) { globalHashVec[j] = -1; } for (IT j=0; (unsigned)j < nnzcolB; ++j) { IT t_bcol = Bdcsc->ir[Bdcsc->cp[i] + j]; for (IT k = colinds[j].first; (unsigned)k < colinds[j].second; ++k) { IT key = Adcsc->ir[k]; IT hash = (key*hashScale) & (ht_size-1); while (1) //hash probing { if (globalHashVec[hash] == key) //key is found in hash table { break; } else if (globalHashVec[hash] == -1) //key is not registered yet { globalHashVec[hash] = key; colnnzC[i] ++; break; } else //key is not found { hash = (hash+1) & (ht_size-1); } } } } } if(deleteAux) delete [] aux; return colnnzC; } // sampling-based nnz estimation (within SUMMA) template <typename IT, typename NT1, typename NT2> int64_t estimateNNZ_sampling( const SpDCCols<IT, NT1> &A, const SpDCCols<IT, NT2> &B, int nrounds = 5 ) { IT nnzA = A.getnnz(); if (A.isZero() || B.isZero()) return 0; Dcsc<IT,NT1> *Adcsc = A.GetDCSC(); Dcsc<IT,NT2> *Bdcsc = B.GetDCSC(); float lambda = 1.0f; float usedmem = 0.0f; IT m = A.getnrow(); IT p = A.getncol(); float *samples_init, *samples_mid, *samples_final; float *colest; // samples samples_init = (float *) malloc(m * nrounds * sizeof(*samples_init)); samples_mid = (float *) malloc(p * nrounds * sizeof(*samples_mid)); int nthds = 1; #ifdef THREADED #pragma omp parallel #endif { nthds = omp_get_num_threads(); } #ifdef THREADED #pragma omp parallel #endif { std::default_random_engine gen; std::exponential_distribution<float> exp_dist(lambda); #ifdef THREADED #pragma omp parallel for #endif for (IT i = 0; i < m * nrounds; ++i) samples_init[i] = exp_dist(gen); } #ifdef THREADED #pragma omp parallel for #endif for (IT i = 0; i < p * nrounds; ++i) samples_mid[i] = std::numeric_limits<float>::max(); #ifdef THREADED #pragma omp parallel for #endif for (IT i = 0; i < Adcsc->nzc; ++i) { IT col = Adcsc->jc[i]; IT beg_mid = col * nrounds; for (IT j = Adcsc->cp[i]; j < Adcsc->cp[i + 1]; ++j) { IT row = Adcsc->ir[j]; IT beg_init = row * nrounds; for (int k = 0; k < nrounds; ++k) { if (samples_init[beg_init + k] < samples_mid[beg_mid + k]) samples_mid[beg_mid + k] = samples_init[beg_init + k]; } } } free(samples_init); samples_final = (float *) malloc(B.getnzc() * nrounds * sizeof(*samples_final)); colest = (float *) malloc(B.getnzc() * sizeof(*colest)); float nnzest = 0.0f; #ifdef THREADED #pragma omp parallel for reduction (+:nnzest) #endif for (IT i = 0; i < Bdcsc->nzc; ++i) { int tid = 0; #ifdef THREADED tid = omp_get_thread_num(); #endif IT beg_final = i * nrounds; for (IT k = beg_final; k < beg_final + nrounds; ++k) samples_final[k] = std::numeric_limits<float>::max(); for (IT j = Bdcsc->cp[i]; j < Bdcsc->cp[i + 1]; ++j) { IT row = Bdcsc->ir[j]; IT beg_mid = row * nrounds; for (int k = 0; k < nrounds; ++k) { if (samples_mid[beg_mid + k] < samples_final[beg_final + k]) samples_final[beg_final + k] = samples_mid[beg_mid + k]; } } colest[i] = 0.0f; for (IT k = beg_final; k < beg_final + nrounds; ++k) colest[i] += samples_final[k]; colest[i] = static_cast<float>(nrounds - 1) / colest[i]; nnzest += colest[i]; } free(samples_mid); free(samples_final); free(colest); return static_cast<int64_t>(nnzest); } // estimate the number of floating point operations of SpGEMM template <typename IT, typename NT1, typename NT2> IT* estimateFLOP(const SpDCCols<IT, NT1> & A,const SpDCCols<IT, NT2> & B, IT * aux = nullptr) { IT nnzA = A.getnnz(); if(A.isZero() || B.isZero()) { return NULL; } Dcsc<IT,NT1>* Adcsc = A.GetDCSC(); Dcsc<IT,NT2>* Bdcsc = B.GetDCSC(); float cf = static_cast<float>(A.getncol()+1) / static_cast<float>(Adcsc->nzc); IT csize = static_cast<IT>(ceil(cf)); // chunk size //IT * aux; bool deleteAux = false; if(aux==nullptr) { deleteAux = true; Adcsc->ConstructAux(A.getncol(), aux); } int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif IT* colflopC = new IT[Bdcsc->nzc]; // flop in every nonempty column of C #ifdef THREADED #pragma omp parallel for #endif for(IT i=0; i< Bdcsc->nzc; ++i) { colflopC[i] = 0; } // thread private space for heap and colinds std::vector<std::vector< std::pair<IT,IT>>> colindsVec(numThreads); /* for(int i=0; i<numThreads; i++) //inital allocation per thread, may be an overestimate, but does not require more memoty than inputs { colindsVec[i].resize(nnzA/numThreads); }*/ #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i < Bdcsc->nzc; ++i) { size_t nnzcolB = Bdcsc->cp[i+1] - Bdcsc->cp[i]; //nnz in the current column of B int myThread = 0; #ifdef THREADED myThread = omp_get_thread_num(); #endif if(colindsVec[myThread].size() < nnzcolB) //resize thread private vectors if needed { colindsVec[myThread].resize(nnzcolB); } // colinds.first vector keeps indices to A.cp, i.e. it dereferences "colnums" vector (above), // colinds.second vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack) Adcsc->FillColInds(Bdcsc->ir + Bdcsc->cp[i], nnzcolB, colindsVec[myThread], aux, csize); for (IT j = 0; (unsigned)j < nnzcolB; ++j) { colflopC[i] += colindsVec[myThread][j].second - colindsVec[myThread][j].first; } } if(deleteAux) delete [] aux; return colflopC; } //////////////////////////////////////////////////////////////////////////////// //////////////////////////// CSC-based local SpGEMM //////////////////////////// //////////////////////////////////////////////////////////////////////////////// template <typename SR, typename NTO, typename IT, typename NT1, typename NT2> SpTuples <IT, NTO> * LocalHybridSpGEMM (const SpCCols<IT, NT1> &A, const SpCCols<IT, NT2> &B, bool clearA, bool clearB ) { double t0 = MPI_Wtime(); IT mdim = A.getnrow(); IT ndim = B.getncol(); IT nnzA = A.getnnz(); if(A.isZero() || B.isZero()) return new SpTuples<IT, NTO>(0, mdim, ndim); Csc<IT, NT1> *Acsc = A.GetCSC(); Csc<IT, NT2> *Bcsc = B.GetCSC(); int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif IT *flopC = estimateFLOP(A, B); IT *flopptr = prefixsum<IT>(flopC, Bcsc->n, numThreads); IT flop = flopptr[Bcsc->n]; IT *colnnzC = estimateNNZ_Hash(A, B, flopC); IT *colptrC = prefixsum<IT>(colnnzC, Bcsc->n, numThreads); delete [] colnnzC; delete [] flopC; IT nnzc = colptrC[Bcsc->n]; double compression_ratio = (double)flop / nnzc; std::tuple<IT, IT, NTO> *tuplesC = static_cast<std::tuple<IT, IT, NTO> *> (::operator new (sizeof(std::tuple<IT, IT, NTO>[nnzc]))); #ifdef THREADED #pragma omp parallel for #endif for (size_t i = 0; i < Bcsc->n; ++i) { size_t nnzcolB = Bcsc->jc[i + 1] - Bcsc->jc[i]; double cr = static_cast<double> (flopptr[i+1] - flopptr[i]) / (colptrC[i+1] - colptrC[i]); if (cr < 2.0) // Heap Algorithm { std::vector<IT> cnt(nnzcolB); std::vector<HeapEntry<IT, NT1>> globalheapVec(nnzcolB); HeapEntry<IT, NT1> *wset = globalheapVec.data(); IT hsize = 0; for (IT j = Bcsc->jc[i]; j < Bcsc->jc[i + 1]; ++j) { IT ca = Bcsc->ir[j]; cnt[j - Bcsc->jc[i]] = Acsc->jc[ca]; if (Acsc->jc[ca] != Acsc->jc[ca + 1]) // col not empty wset[hsize++] = HeapEntry<IT, NT1> (Acsc->ir[Acsc->jc[ca]], j, Acsc->num[Acsc->jc[ca]]); } std::make_heap(wset, wset + hsize); IT curptr = colptrC[i]; while (hsize > 0) { std::pop_heap(wset, wset + hsize); IT locb = wset[hsize - 1].runr; NTO mrhs = SR::multiply(wset[hsize - 1].num, Bcsc->num[locb]); if (!SR::returnedSAID()) { if ((curptr > colptrC[i]) && std::get<0>(tuplesC[curptr - 1]) == wset[hsize - 1].key) std::get<2>(tuplesC[curptr - 1]) = SR::add(std::get<2>(tuplesC[curptr - 1]), mrhs); else tuplesC[curptr++] = std::make_tuple(wset[hsize - 1].key, i, mrhs) ; } IT locb_offset = locb - Bcsc->jc[i]; IT ca = Bcsc->ir[locb]; ++(cnt[locb_offset]); if (cnt[locb_offset] != Acsc->jc[ca + 1]) { wset[hsize - 1].key = Acsc->ir[cnt[locb_offset]]; wset[hsize - 1].num = Acsc->num[cnt[locb_offset]]; std::push_heap(wset, wset + hsize); } else --hsize; } } // Finish heap else // Hash Algorithm { // Set up hash table const IT minHashTableSize = 16; const IT hashScale = 107; size_t nnzcolC = colptrC[i+1] - colptrC[i]; size_t ht_size = minHashTableSize; while (ht_size < nnzcolC) ht_size <<= 1; std::vector< std::pair<IT, NTO>> T(ht_size); for (size_t j = 0; j < ht_size; ++j) T[j].first = std::numeric_limits<IT>::max(); // multiplication for (IT j = Bcsc->jc[i]; j < Bcsc->jc[i + 1]; ++j) { IT t_bcol = Bcsc->ir[j]; NT2 t_bval = Bcsc->num[j]; for (IT k = Acsc->jc[t_bcol]; k < Acsc->jc[t_bcol + 1]; ++k) { NTO mrhs = SR::multiply(Acsc->num[k], t_bval); IT key = Acsc->ir[k]; IT hv = (key * hashScale) & (ht_size - 1); repeat: if (T[hv].first == key) T[hv].second = SR::add(mrhs, T[hv].second); else if (T[hv].first == std::numeric_limits<IT>::max()) { T[hv].first = key; T[hv].second = mrhs; } else { hv = (hv + 1) & (ht_size - 1); goto repeat; } } } size_t index = 0; for (size_t j = 0; j < ht_size; ++j) { if (T[j].first != std::numeric_limits<IT>::max()) T[index++] = T[j]; } std::sort(T.begin(), T.begin() + index, sort_less<IT, NTO>); IT curptr = colptrC[i]; for (size_t j = 0; j < index; ++j) tuplesC[curptr++] = std::make_tuple(T[j].first, i, T[j].second); } } if (clearA) delete const_cast<SpCCols<IT, NT1> *>(&A); if (clearB) delete const_cast<SpCCols<IT, NT2> *>(&B); delete [] colptrC; delete [] flopptr; SpTuples<IT, NTO> *spTuplesC = new SpTuples<IT, NTO> (nnzc, mdim, ndim, tuplesC, true, true); double t1 = MPI_Wtime(); return spTuplesC; } template <typename IT, typename NT1, typename NT2> IT * estimateFLOP (const SpCCols<IT, NT1> &A, const SpCCols<IT, NT2> &B ) { IT nnzA = A.getnnz(); if (A.isZero() || B.isZero()) return NULL; Csc<IT, NT1> *Acsc = A.GetCSC(); Csc<IT, NT2> *Bcsc = B.GetCSC(); int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif IT *colflopC = new IT[Bcsc->n]; #ifdef THREADED #pragma omp parallel for #endif for (IT i = 0; i < Bcsc->n; ++i) colflopC[i] = 0; #ifdef THREADED #pragma omp parallel for #endif for (IT i = 0; i < Bcsc->n; ++i) { for (IT j = Bcsc->jc[i]; j < Bcsc->jc[i+1]; ++j) colflopC[i] += Acsc->jc[Bcsc->ir[j]+1] - Acsc->jc[Bcsc->ir[j]]; } return colflopC; } template <typename IT, typename NT1, typename NT2> IT * estimateNNZ_Hash (const SpCCols<IT, NT1> &A, const SpCCols<IT, NT2> &B, const IT *flopC ) { IT nnzA = A.getnnz(); if (A.isZero() || B.isZero()) return NULL; Csc<IT, NT1> *Acsc = A.GetCSC(); Csc<IT, NT2> *Bcsc = B.GetCSC(); int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif IT *colnnzC = new IT[Bcsc->n]; #ifdef THREADED #pragma omp parallel for #endif for (IT i = 0; i < Bcsc->n; ++i) colnnzC[i] = 0; #ifdef THREADED #pragma omp parallel for #endif for (IT i = 0; i < Bcsc->n; ++i) { // init hash table const IT minHashTableSize = 16; const IT hashScale = 107; IT ht_size = minHashTableSize; while (ht_size < flopC[i]) // make size of hash table a power of 2 ht_size <<= 1; // IT can be unsigned std::vector<IT> T(ht_size); for (IT j = 0; (unsigned)j < ht_size; ++j) T[j] = std::numeric_limits<IT>::max(); for (IT j = Bcsc->jc[i]; j < Bcsc->jc[i + 1]; ++j) { for (IT k = Acsc->jc[Bcsc->ir[j]]; k < Acsc->jc[Bcsc->ir[j]+1]; ++k) { IT key = Acsc->ir[k]; IT hv = (key * hashScale) & (ht_size - 1); while (1) { if (T[hv] == key) break; else if (T[hv] == std::numeric_limits<IT>::max()) { T[hv] = key; ++(colnnzC[i]); break; } else hv = (hv + 1) & (ht_size - 1); } } } } return colnnzC; } } #endif
gemm.c
#include "gemm.h" #include "utils.h" #include "cuda.h" #include <stdlib.h> #include <stdio.h> #include <math.h> void gemm_bin(int M, int N, int K, float ALPHA, char *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ char A_PART = A[i*lda+k]; if(A_PART){ for(j = 0; j < N; ++j){ C[i*ldc+j] += B[k*ldb+j]; } } else { for(j = 0; j < N; ++j){ C[i*ldc+j] -= B[k*ldb+j]; } } } } } float *random_matrix(int rows, int cols) { int i; float *m = calloc(rows*cols, sizeof(float)); for(i = 0; i < rows*cols; ++i){ m[i] = (float)rand()/RAND_MAX; } return m; } void time_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<10; ++i){ gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc); } void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[i*lda+k]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_nt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i*lda+k]*B[j*ldb + k]; } C[i*ldc+j] += sum; } } } void gemm_tn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[k*lda+i]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_tt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i+k*lda]*B[k+j*ldb]; } C[i*ldc+j] += sum; } } } void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); int i, j; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ C[i*ldc + j] *= BETA; } } int t; #pragma omp parallel for for (t = 0; t < M; ++t) { if (!TA && !TB) gemm_nn(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc); else if (TA && !TB) gemm_tn(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc); else if (!TA && TB) gemm_nt(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc); else gemm_tt(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc); } } #ifdef GPU #include <math.h> void gemm_ongpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A_gpu, int lda, float *B_gpu, int ldb, float BETA, float *C_gpu, int ldc) { cublasHandle_t handle = blas_handle(); cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N), (TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc); check_error(status); } void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { float *A_gpu = cuda_make_array(A, (TA ? lda*K:lda*M)); float *B_gpu = cuda_make_array(B, (TB ? ldb*N : ldb*K)); float *C_gpu = cuda_make_array(C, ldc*M); gemm_ongpu(TA, TB, M, N, K, ALPHA, A_gpu, lda, B_gpu, ldb, BETA, C_gpu, ldc); cuda_pull_array(C_gpu, C, ldc*M); cuda_free(A_gpu); cuda_free(B_gpu); cuda_free(C_gpu); } #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> void time_gpu_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<32; ++i){ gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void time_ongpu(int TA, int TB, int m, int k, int n) { int iter = 10; float *a = random_matrix(m,k); float *b = random_matrix(k,n); int lda = (!TA)?k:m; int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *a_cl = cuda_make_array(a, m*k); float *b_cl = cuda_make_array(b, k*n); float *c_cl = cuda_make_array(c, m*n); int i; clock_t start = clock(), end; for(i = 0; i<iter; ++i){ gemm_ongpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n); cudaThreadSynchronize(); } double flop = ((double)m)*n*(2.*k + 2.)*iter; double gflop = flop/pow(10., 9); end = clock(); double seconds = sec(end-start); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds); cuda_free(a_cl); cuda_free(b_cl); cuda_free(c_cl); free(a); free(b); free(c); } void test_gpu_accuracy(int TA, int TB, int m, int k, int n) { srand(0); float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *c_gpu = random_matrix(m,n); memset(c, 0, m*n*sizeof(float)); memset(c_gpu, 0, m*n*sizeof(float)); int i; //pm(m,k,b); gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n); //printf("GPU\n"); //pm(m, n, c_gpu); gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); //printf("\n\nCPU\n"); //pm(m, n, c); double sse = 0; for(i = 0; i < m*n; ++i) { //printf("%f %f\n", c[i], c_gpu[i]); sse += pow(c[i]-c_gpu[i], 2); } printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n)); free(a); free(b); free(c); free(c_gpu); } int test_gpu_blas() { /* test_gpu_accuracy(0,0,10,576,75); test_gpu_accuracy(0,0,17,10,10); test_gpu_accuracy(1,0,17,10,10); test_gpu_accuracy(0,1,17,10,10); test_gpu_accuracy(1,1,17,10,10); test_gpu_accuracy(0,0,1000,10,100); test_gpu_accuracy(1,0,1000,10,100); test_gpu_accuracy(0,1,1000,10,100); test_gpu_accuracy(1,1,1000,10,100); test_gpu_accuracy(0,0,10,10,10); time_ongpu(0,0,64,2916,363); time_ongpu(0,0,64,2916,363); time_ongpu(0,0,64,2916,363); time_ongpu(0,0,192,729,1600); time_ongpu(0,0,384,196,1728); time_ongpu(0,0,256,196,3456); time_ongpu(0,0,256,196,2304); time_ongpu(0,0,128,4096,12544); time_ongpu(0,0,128,4096,4096); */ time_ongpu(0,0,64,75,12544); time_ongpu(0,0,64,75,12544); time_ongpu(0,0,64,75,12544); time_ongpu(0,0,64,576,12544); time_ongpu(0,0,256,2304,784); time_ongpu(1,1,2304,256,784); time_ongpu(0,0,512,4608,196); time_ongpu(1,1,4608,512,196); return 0; } #endif
binary_move_generator.h
/*****************************************************************************/ // Copyright (c) 2020-2021 Yuji KOGUMA // Released under the MIT license // https://opensource.org/licenses/mit-license.php /*****************************************************************************/ #ifndef PRINTEMPS_NEIGHBORHOOD_BINARY_MOVE_GENERATOR_H__ #define PRINTEMPS_NEIGHBORHOOD_BINARY_MOVE_GENERATOR_H__ #include "abstract_move_generator.h" namespace printemps { namespace neighborhood { /*****************************************************************************/ template <class T_Variable, class T_Expression> class BinaryMoveGenerator : public AbstractMoveGenerator<T_Variable, T_Expression> { private: public: /*************************************************************************/ BinaryMoveGenerator(void) { /// nothing to do } /*************************************************************************/ virtual ~BinaryMoveGenerator(void) { /// nothing to do } /*************************************************************************/ void setup( const std::vector<model_component::Variable<T_Variable, T_Expression> *> &a_RAW_VARIABLE_PTRS) { /** * "Flip" move for binary variables: * e.g) binary variable x \in {0, 1} * move: {(x = 1)} (if x = 0) * {(x = 0)} (if x = 1) */ /** * Extract mutable variables. */ auto mutable_variable_ptrs = extract_mutable_variable_ptrs(a_RAW_VARIABLE_PTRS); /** * Setup move objects. */ const int VARIABLES_SIZE = mutable_variable_ptrs.size(); this->m_moves.resize(VARIABLES_SIZE); this->m_flags.resize(VARIABLES_SIZE); for (auto i = 0; i < VARIABLES_SIZE; i++) { this->m_moves[i].sense = MoveSense::Binary; this->m_moves[i].related_constraint_ptrs = mutable_variable_ptrs[i]->related_constraint_ptrs(); this->m_moves[i].alterations.emplace_back(mutable_variable_ptrs[i], 0); this->m_moves[i].is_univariable_move = true; this->m_moves[i].is_special_neighborhood_move = false; this->m_moves[i].is_available = true; this->m_moves[i].overlap_rate = 0.0; } /** * Setup move updater. */ auto move_updater = // [this, mutable_variable_ptrs, VARIABLES_SIZE]( auto * a_moves, // auto * a_flags, // const bool a_ACCEPT_ALL, // const bool a_ACCEPT_OBJECTIVE_IMPROVABLE, // const bool a_ACCEPT_FEASIBILITY_IMPROVABLE, // [[maybe_unused]] const bool a_IS_ENABLED_PARALLEL) { #ifdef _OPENMP #pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static) #endif for (auto i = 0; i < VARIABLES_SIZE; i++) { if (a_ACCEPT_ALL || (a_ACCEPT_OBJECTIVE_IMPROVABLE && mutable_variable_ptrs[i]->is_objective_improvable()) || (a_ACCEPT_FEASIBILITY_IMPROVABLE && mutable_variable_ptrs[i] ->is_feasibility_improvable())) { (*a_moves)[i].alterations.front().second = 1 - mutable_variable_ptrs[i]->value(); (*a_flags)[i] = 1; } else { (*a_flags)[i] = 0; } } }; this->m_move_updater = move_updater; } }; } // namespace neighborhood } // namespace printemps #endif /*****************************************************************************/ // END /*****************************************************************************/
template_cpu_01.h
/* Copyright 2015 The math21 Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #pragma once #include "inner.h" #define MATH21_IS_FROM_CPU #include "../kernels/generic_01.kl" #include "../kernels/generic_01_vector_set.kl" #include "../kernels/generic_01_tensor_3d_f_set.kl" #undef MATH21_IS_FROM_CPU namespace math21 { // a special kind of sub, region sub. // x is sub-tensor of y template<typename T> void math21_template_tensor_sub_set_or_get_cpu(NumN n, T *x, T *y, NumN dims, const NumN *dx, const NumN *dy, const NumN *offset, NumB isGet) { math21_tool_assert(dims <= MATH21_KERNEL_ARRAY_MAX_LENGTH); x -= 1; y -= 1; dx -= 1; dy -= 1; offset -= 1; NumN id; #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_tensor_sub_set_or_get_cpu_kernel(n, x, y, dims, dx, dy, offset, isGet, id); } } // x = k*x template<typename T> void math21_template_vector_kx_cpu(NumN n, T k, T *x, NumN stride_x) { NumN id; #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_vector_kx_cpu_kernel(n, k, x, stride_x, id); } } template<typename T> void math21_template_vector_kx_add_y_cpu(NumN n, T k, const T *x, NumN stride_x, T *y, NumN stride_y) { x -= 1; y -= 1; NumN id; #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_vector_kx_add_y_cpu_kernel(n, k, x, stride_x, y, stride_y, id); } } // see math21_vector_assign_from_vector_byte_cpu template<typename T1, typename T2> void math21_template_vector_set_by_vector_cpu(NumN n, const T1 *x, NumN stride_x, T2 *y, NumN stride_y, NumN offset_x, NumN offset_y) { x += offset_x; y += offset_y; x -= 1; y -= 1; NumN id; #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_vector_set_by_vector_cpu_kernel(n, x, stride_x, y, stride_y, id); } } template<typename T> void math21_template_matrix_set_by_matrix_cpu(NumN d1, NumN d2, const T *x, NumN d1_x, NumN d2_x, NumN stride1_x, NumN stride2_x, T *y, NumN d1_y, NumN d2_y, NumN stride1_y, NumN stride2_y, NumN offset_x, NumN offset_y) { d2_x = stride1_x * d2_x; // stride absorbed into next dim, so stride will become 1. d2_y = stride1_y * d2_y; x += offset_x; y += offset_y; x -= 1; y -= 1; NumN n = d1 * d2; NumN id; #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_matrix_set_by_matrix_cpu_kernel( n, d2, x, d2_x, stride2_x, y, d2_y, stride2_y, id); } } template<typename T> void math21_template_tensor_3d_set_by_tensor_3d_cpu(NumN d1, NumN d2, NumN d3, const T *x, NumN d1_x, NumN d2_x, NumN d3_x, NumN stride1_x, NumN stride2_x, NumN stride3_x, T *y, NumN d1_y, NumN d2_y, NumN d3_y, NumN stride1_y, NumN stride2_y, NumN stride3_y, NumN offset_x, NumN offset_y) { d2_x = stride1_x * d2_x; d2_y = stride1_y * d2_y; d3_x = stride2_x * d3_x; d3_y = stride2_y * d3_y; x += offset_x; y += offset_y; x -= 1; y -= 1; NumN n = d1 * d2 * d3; NumN id; #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_tensor_3d_set_by_tensor_3d_cpu_kernel( n, d2, d3, x, d2_x, d3_x, stride3_x, y, d2_y, d3_y, stride3_y, id); } } template<typename T> void math21_template_tensor_3d_f_set_by_tensor_3d_cpu(NumN fname, NumN d1, NumN d2, NumN d3, const T *x, NumN d1_x, NumN d2_x, NumN d3_x, NumN stride1_x, NumN stride2_x, NumN stride3_x, T *y, NumN d1_y, NumN d2_y, NumN d3_y, NumN stride1_y, NumN stride2_y, NumN stride3_y, NumN offset_x, NumN offset_y) { d2_x = stride1_x * d2_x; d2_y = stride1_y * d2_y; d3_x = stride2_x * d3_x; d3_y = stride2_y * d3_y; x += offset_x; y += offset_y; x -= 1; y -= 1; NumN n = d1 * d2 * d3; NumN id; math21_type_f_addto_like f = NULL; if (fname == m21_fname_addto) { f = math21_device_f_addto; } else if (fname == m21_fname_multo) { f = math21_device_f_multo; } else { MATH21_ASSERT(0, "not support calling function with name " << fname); } #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_tensor_3d_f_set_by_tensor_3d_cpu_kernel( f, n, d2, d3, x, d2_x, d3_x, stride3_x, y, d2_y, d3_y, stride3_y, id); } } template<typename T> void math21_template_vector_set_by_value_cpu(NumN n, T value, T *x, NumN stride_x) { x -= 1; NumN id; #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_vector_set_by_value_cpu_kernel(n, value, x, stride_x, id); } } template<typename T> void math21_template_vector_xy_cpu(NumN n, const T *x, NumN stride_x, T *y, NumN stride_y) { x -= 1; y -= 1; NumN id; #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_vector_xy_cpu_kernel(n, x, stride_x, y, stride_y, id); } } template<typename T> void math21_template_vector_sin_cpu(NumN n, const T *x, T *y) { NumN id; #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_vector_sin_cpu_kernel(n, x, y, id); } } template<typename T> void math21_template_vector_cos_cpu(NumN n, const T *x, T *y) { NumN id; #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_vector_cos_cpu_kernel(n, x, y, id); } } template<typename T> void math21_template_tensor_3d_swap_row_in_d2_cpu(NumN n, T *x, NumN i, NumN j, NumN d1, NumN d2, NumN d3) { x -= 1; NumN id; #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_tensor_3d_swap_row_in_d2_cpu_kernel(n, x, i, j, d1, d2, d3, id); } } template<typename T> void math21_template_vector_addToC_cpu(NumN n, const T *A, const T *B, T *C) { NumN id; #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_vector_addToC_cpu_kernel(n, A, B, C, id); } } template<typename T> void math21_template_vector_mulToC_cpu(NumN n, const T *A, const T *B, T *C) { NumN id; #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_vector_mulToC_cpu_kernel(n, A, B, C, id); } } // todo: use index 1 for x, y // a special kind of sub // x is sub-tensor of y template<typename T> void math21_template_vector_broadcast_in_dn_cpu(NumN n, const T *x, T *y, NumN dims_x, const NumN *dx, NumN dims_y, const NumN *dy) { x -= 1; y -= 1; dx -= 1; dy -= 1; NumN id; #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_vector_broadcast_in_dn_cpu_kernel(n, x, y, dims_x, dx, dims_y, dy, id); } } template<typename T> void math21_template_optimization_adam_update_part_2_cpu(NumN n, T *x, const T *m, const T *v, T beta1, T beta2, T alpha, T eps, NumN t) { x -= 1; m -= 1; v -= 1; NumN id; #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_optimization_adam_update_part_2_cpu_kernel( n, x, m, v, beta1, beta2, alpha, eps, t, id); } } }
convolution_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_transform_kernel_pack4_neon(const Mat& weight_data, Mat& weight_data_pack4, int num_input, int num_output, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // src = kw-kh-inch-outch // dst = 4b-4a-kw-kh-inch/4a-outch/4b Mat weight_data_r2 = weight_data.reshape(maxk, num_input, num_output); weight_data_pack4.create(maxk, num_input / 4, num_output / 4, (size_t)4 * 16, 16); for (int q = 0; q + 3 < num_output; q += 4) { const Mat k0 = weight_data_r2.channel(q); const Mat k1 = weight_data_r2.channel(q + 1); const Mat k2 = weight_data_r2.channel(q + 2); const Mat k3 = weight_data_r2.channel(q + 3); Mat g0 = weight_data_pack4.channel(q / 4); for (int p = 0; p + 3 < num_input; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); float* g00 = g0.row(p / 4); for (int k = 0; k < maxk; k++) { g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k01[k]; g00[5] = k11[k]; g00[6] = k21[k]; g00[7] = k31[k]; g00[8] = k02[k]; g00[9] = k12[k]; g00[10] = k22[k]; g00[11] = k32[k]; g00[12] = k03[k]; g00[13] = k13[k]; g00[14] = k23[k]; g00[15] = k33[k]; g00 += 16; } } } } static void convolution_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack4, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float32x4_t _sum = vdupq_n_f32(0.f); if (bias_data_ptr) { _sum = vld1q_f32(bias_data_ptr + p * 4); } const float* kptr = (const float*)weight_data_pack4 + maxk * channels * p * 16; // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const float* sptr = m.row(i * stride_h) + j * stride_w * 4; for (int k = 0; k < maxk; k++) // 29.23 { float32x4_t _val = vld1q_f32(sptr + space_ofs[k] * 4); float32x4_t _w0 = vld1q_f32(kptr); float32x4_t _w1 = vld1q_f32(kptr + 4); float32x4_t _w2 = vld1q_f32(kptr + 8); float32x4_t _w3 = vld1q_f32(kptr + 12); #if __aarch64__ _sum = vmlaq_laneq_f32(_sum, _w0, _val, 0); _sum = vmlaq_laneq_f32(_sum, _w1, _val, 1); _sum = vmlaq_laneq_f32(_sum, _w2, _val, 2); _sum = vmlaq_laneq_f32(_sum, _w3, _val, 3); #else _sum = vmlaq_lane_f32(_sum, _w0, vget_low_f32(_val), 0); _sum = vmlaq_lane_f32(_sum, _w1, vget_low_f32(_val), 1); _sum = vmlaq_lane_f32(_sum, _w2, vget_high_f32(_val), 0); _sum = vmlaq_lane_f32(_sum, _w3, vget_high_f32(_val), 1); #endif kptr += 16; } } _sum = activation_ps(_sum, activation_type, activation_params); vst1q_f32(outptr + j * 4, _sum); } outptr += outw * 4; } } }
two_step_v_p_strategy.h
// // Project Name: KratosPFEMFluidDynamicsApplication $ // Last modified by: $Author: AFranci $ // Date: $Date: January 2016 $ // Revision: $Revision: 0.0 $ // // #ifndef KRATOS_TWO_STEP_V_P_STRATEGY_H #define KRATOS_TWO_STEP_V_P_STRATEGY_H #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "includes/cfd_variables.h" #include "utilities/openmp_utils.h" #include "processes/process.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/strategies/solving_strategy.h" #include "custom_utilities/mesher_utilities.hpp" #include "custom_utilities/boundary_normals_calculation_utilities.hpp" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" /* #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme_slip.h" */ #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h" #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_componentwise.h" #include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h" #include "custom_utilities/solver_settings.h" #include "custom_strategies/strategies/gauss_seidel_linear_strategy.h" #include "pfem_fluid_dynamics_application_variables.h" #include <stdio.h> #include <math.h> namespace Kratos { ///@addtogroup PFEMFluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template <class TSparseSpace, class TDenseSpace, class TLinearSolver> class TwoStepVPStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(TwoStepVPStrategy); /// Counted pointer of TwoStepVPStrategy //typedef boost::shared_ptr< TwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TDataType TDataType; //typedef typename BaseType::DofSetType DofSetType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType; typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType; ///@} ///@name Life Cycle ///@{ TwoStepVPStrategy(ModelPart &rModelPart, SolverSettingsType &rSolverConfig) : BaseType(rModelPart) { InitializeStrategy(rSolverConfig); } TwoStepVPStrategy(ModelPart &rModelPart, /*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/ typename TLinearSolver::Pointer pVelocityLinearSolver, typename TLinearSolver::Pointer pPressureLinearSolver, bool ReformDofSet = true, double VelTol = 0.0001, double PresTol = 0.0001, int MaxPressureIterations = 1, // Only for predictor-corrector unsigned int TimeOrder = 2, unsigned int DomainSize = 2) : BaseType(rModelPart), // Move Mesh flag, pass as input? mVelocityTolerance(VelTol), mPressureTolerance(PresTol), mMaxPressureIter(MaxPressureIterations), mDomainSize(DomainSize), mTimeOrder(TimeOrder), mReformDofSet(ReformDofSet) { KRATOS_TRY; BaseType::SetEchoLevel(1); // Check that input parameters are reasonable and sufficient. this->Check(); bool CalculateNormDxFlag = true; bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly. // Additional Typedefs //typedef typename Kratos::VariableComponent<Kratos::VectorComponentAdaptor<Kratos::array_1d<double, 3 > > > VarComponent; typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; //initializing fractional velocity solution step typedef Scheme<TSparseSpace, TDenseSpace> SchemeType; typename SchemeType::Pointer pScheme; typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>()); pScheme.swap(Temp); //CONSTRUCTION OF VELOCITY BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver)); /* BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); */ this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel()); vel_build->SetCalculateReactionsFlag(false); /* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); */ BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver)); this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel()); pressure_build->SetCalculateReactionsFlag(false); KRATOS_CATCH(""); } /// Destructor. virtual ~TwoStepVPStrategy() {} int Check() override { KRATOS_TRY; // Check elements and conditions in the model part int ierr = BaseType::Check(); if (ierr != 0) return ierr; if (DELTA_TIME.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error, "DELTA_TIME Key is 0. Check that the application was correctly registered.", ""); if (BDF_COEFFICIENTS.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error, "BDF_COEFFICIENTS Key is 0. Check that the application was correctly registered.", ""); ModelPart &rModelPart = BaseType::GetModelPart(); if (mTimeOrder == 2 && rModelPart.GetBufferSize() < 3) KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (BDF2), needed 3, got ", rModelPart.GetBufferSize()); if (mTimeOrder == 1 && rModelPart.GetBufferSize() < 2) KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (Backward Euler), needed 2, got ", rModelPart.GetBufferSize()); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); for (ModelPart::ElementIterator itEl = rModelPart.ElementsBegin(); itEl != rModelPart.ElementsEnd(); ++itEl) { ierr = itEl->Check(rCurrentProcessInfo); if (ierr != 0) break; } /* for ( ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); ++itCond) */ /* { */ /* ierr = itCond->Check(rCurrentProcessInfo); */ /* if (ierr != 0) break; */ /* } */ return ierr; KRATOS_CATCH(""); } double Solve() override { // Initialize BDF2 coefficients ModelPart &rModelPart = BaseType::GetModelPart(); this->SetTimeCoefficients(rModelPart.GetProcessInfo()); double NormDp = 0.0; ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED]; unsigned int stepsWithChangedDt = rCurrentProcessInfo[STEPS_WITH_CHANGED_DT]; unsigned int maxNonLinearIterations = mMaxPressureIter; KRATOS_INFO("TwoStepVPStrategy") << "\n Solve with two_step_vp strategy at t=" << currentTime << "s" << std::endl; if ((timeIntervalChanged == true && currentTime > 10 * timeInterval) || stepsWithChangedDt > 0) { maxNonLinearIterations *= 2; } if (currentTime < 10 * timeInterval) { if (BaseType::GetEchoLevel() > 1) std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl; maxNonLinearIterations *= 3; } if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval) { if (BaseType::GetEchoLevel() > 1) std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl; maxNonLinearIterations *= 2; } bool momentumConverged = true; bool continuityConverged = false; bool fixedTimeStep = false; bool momentumAlreadyConverged = false; bool continuityAlreadyConverged = false; /* boost::timer solve_step_time; */ // Iterative solution for pressure /* unsigned int timeStep = rCurrentProcessInfo[STEP]; */ /* if(timeStep==1){ */ /* unsigned int iter=0; */ /* continuityConverged = this->SolveContinuityIteration(iter,maxNonLinearIterations); */ /* }else if(timeStep==2){ */ /* unsigned int iter=0; */ /* momentumConverged = this->SolveMomentumIteration(iter,maxNonLinearIterations,fixedTimeStep); */ /* }else{ */ this->UnactiveSliverElements(); for (unsigned int it = 0; it < maxNonLinearIterations; ++it) { if (BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "----- > iteration: " << it << std::endl; momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep); this->UpdateTopology(rModelPart, BaseType::GetEchoLevel()); if ((momentumConverged == true || it == maxNonLinearIterations - 1) && momentumAlreadyConverged == false) { // std::ofstream myfile; // myfile.open ("momentumConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); momentumAlreadyConverged = true; } if ((continuityConverged == true || it == maxNonLinearIterations - 1) && continuityAlreadyConverged == false) { // std::ofstream myfile; // myfile.open ("continuityConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); continuityAlreadyConverged = true; } if (fixedTimeStep == false) { continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations); } if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 2)) { //this->ComputeErrorL2Norm(); //this->ComputeErrorL2NormCasePoiseuille(); this->UpdateStressStrain(); // std::ofstream myfile; // myfile.open ("maxConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); } if ((continuityConverged && momentumConverged) && it > 2) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false); KRATOS_INFO("TwoStepVPStrategy") << "V-P strategy converged in " << it + 1 << " iterations." << std::endl; break; } if (fixedTimeStep == true) { break; } } /* } */ if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Convergence tolerance not reached." << std::endl; /* std::cout << "solve_step_time : " << solve_step_time.elapsed() << std::endl; */ if (mReformDofSet) this->Clear(); return NormDp; } void FinalizeSolutionStep() override { /* this->UpdateStressStrain(); */ } void InitializeSolutionStep() override { } void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel) { KRATOS_TRY; this->CalculateDisplacementsAndPorosity(); BaseType::MoveMesh(); /* BoundaryNormalsCalculationUtilities BoundaryComputation; */ /* BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel); */ KRATOS_CATCH(""); } void UnactiveSliverElements() { KRATOS_TRY; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); MesherUtilities MesherUtils; double ModelPartVolume = MesherUtils.ComputeModelPartVolume(rModelPart); double CriticalVolume = 0.001 * ModelPartVolume / double(rModelPart.Elements().size()); double ElementalVolume = 0; #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { unsigned int numNodes = itElem->GetGeometry().size(); if (numNodes == (dimension + 1)) { if (dimension == 2) { ElementalVolume = (itElem)->GetGeometry().Area(); } else if (dimension == 3) { ElementalVolume = (itElem)->GetGeometry().Volume(); } if (ElementalVolume < CriticalVolume) { // std::cout << "sliver element: it has Volume: " << ElementalVolume << " vs CriticalVolume(meanVol/1000): " << CriticalVolume<< std::endl; (itElem)->Set(ACTIVE, false); } else { (itElem)->Set(ACTIVE, true); } } } } KRATOS_CATCH(""); } void CalculatePressureVelocity() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0; } else { double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0); double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1); double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval; } } } void CalculatePressureAcceleration() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0; } else { double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); double &PreviousPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1); double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0); CurrentPressureAcceleration = (CurrentPressureVelocity - PreviousPressureVelocity) / timeInterval; } } } virtual void CalculateTemporalVariables() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0); array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1); /* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */ if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID))) { UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs); } else if ((i)->Is(RIGID)) { array_1d<double, 3> Zeros(3, 0.0); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros; (i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros; } else { (i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0; if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION)) { array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration; (i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME]; } } const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0; } else { double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0); double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1); double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0); CurrentPressureAcceleration = CurrentPressureVelocity / timeInterval; CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval; CurrentPressureAcceleration += -CurrentPressureVelocity / timeInterval; } } } void CalculateAccelerations() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0); array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1); /* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */ if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID))) { UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs); } else if ((i)->Is(RIGID)) { array_1d<double, 3> Zeros(3, 0.0); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros; (i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros; } else { (i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0; if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION)) { array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration; (i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME]; } } } } inline void UpdateAccelerations(array_1d<double, 3> &CurrentAcceleration, const array_1d<double, 3> &CurrentVelocity, array_1d<double, 3> &PreviousAcceleration, const array_1d<double, 3> &PreviousVelocity, Vector &BDFcoeffs) { /* noalias(PreviousAcceleration)=CurrentAcceleration; */ noalias(CurrentAcceleration) = -BDFcoeffs[1] * (CurrentVelocity - PreviousVelocity) - PreviousAcceleration; // std::cout<<"rBDFCoeffs[0] is "<<rBDFCoeffs[0]<<std::endl;//3/(2*delta_t) // std::cout<<"rBDFCoeffs[1] is "<<rBDFCoeffs[1]<<std::endl;//-2/(delta_t) // std::cout<<"rBDFCoeffs[2] is "<<rBDFCoeffs[2]<<std::endl;//1/(2*delta_t) } virtual void CalculateDisplacementsAndPorosity() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double TimeStep = rCurrentProcessInfo[DELTA_TIME]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1); /* if( i->IsFixed(DISPLACEMENT_X) == false ) */ CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0]; /* if( i->IsFixed(DISPLACEMENT_Y) == false ) */ CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1]; /* if( i->IsFixed(DISPLACEMENT_Z) == false ) */ CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2]; // currentFluidFractionRate = (currentFluidFraction - previousFluidFraction)/TimeStep; } } void UpdateStressStrain() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { /* itElem-> InitializeElementStrainStressState(); */ itElem->InitializeSolutionStep(rCurrentProcessInfo); } } /* this->CalculateAccelerations(); */ /* this->CalculatePressureVelocity(); */ /* this->CalculatePressureAcceleration(); */ this->CalculateTemporalVariables(); } void Clear() override { mpMomentumStrategy->Clear(); mpPressureStrategy->Clear(); } ///@} ///@name Access ///@{ void SetEchoLevel(int Level) override { BaseType::SetEchoLevel(Level); int StrategyLevel = Level > 0 ? Level - 1 : 0; mpMomentumStrategy->SetEchoLevel(StrategyLevel); mpPressureStrategy->SetEchoLevel(StrategyLevel); } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "TwoStepVPStrategy"; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream &rOStream) const override { rOStream << "TwoStepVPStrategy"; } /// Print object's data. void PrintData(std::ostream &rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected Life Cycle ///@{ ///@} ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /// Calculate the coefficients for time iteration. /** * @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables. */ void SetTimeCoefficients(ProcessInfo &rCurrentProcessInfo) { KRATOS_TRY; if (mTimeOrder == 2) { //calculate the BDF coefficients double Dt = rCurrentProcessInfo[DELTA_TIME]; double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME]; double Rho = OldDt / Dt; double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho); Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(3, false); BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant) BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant) BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant) } else if (mTimeOrder == 1) { double Dt = rCurrentProcessInfo[DELTA_TIME]; double TimeCoeff = 1.0 / Dt; Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(2, false); BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt) BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt) } KRATOS_CATCH(""); } bool SolveMomentumIteration(unsigned int it, unsigned int maxIt, bool &fixedTimeStep) { ModelPart &rModelPart = BaseType::GetModelPart(); int Rank = rModelPart.GetCommunicator().MyPID(); bool ConvergedMomentum = false; double NormDv = 0; fixedTimeStep = false; // build momentum system and solve for fractional step velocity increment rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 1); /* std::cout<<"---- m o m e n t u m e q u a t i o n s ----"<<std::endl; */ if (it == 0) { mpMomentumStrategy->InitializeSolutionStep(); } /* else{ */ /* NormDv = mpMomentumStrategy->Solve(); */ /* } */ NormDv = mpMomentumStrategy->Solve(); if (BaseType::GetEchoLevel() > 1 && Rank == 0) std::cout << "-------------- s o l v e d ! ------------------" << std::endl; double DvErrorNorm = 0; ConvergedMomentum = this->CheckVelocityConvergence(NormDv, DvErrorNorm); unsigned int iterationForCheck = 2; KRATOS_INFO("TwoStepVPStrategy") << "iteration(" << it << ") Velocity error: " << DvErrorNorm << " velTol: " << mVelocityTolerance << std::endl; // Check convergence if (it == maxIt - 1) { KRATOS_INFO("TwoStepVPStrategy") << "iteration(" << it << ") Final Velocity error: " << DvErrorNorm << " velTol: " << mVelocityTolerance << std::endl; fixedTimeStep = this->FixTimeStepMomentum(DvErrorNorm); } else if (it > iterationForCheck) { fixedTimeStep = this->CheckMomentumConvergence(DvErrorNorm); } // ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); // double currentTime = rCurrentProcessInfo[TIME]; // double tolerance=0.0000000001; // if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt025s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt05s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt075s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt100s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } if (!ConvergedMomentum && BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Momentum equations did not reach the convergence tolerance." << std::endl; return ConvergedMomentum; } bool SolveContinuityIteration(unsigned int it, unsigned int maxIt) { ModelPart &rModelPart = BaseType::GetModelPart(); int Rank = rModelPart.GetCommunicator().MyPID(); bool ConvergedContinuity = false; double NormDp = 0; // 2. Pressure solution rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 5); /* std::cout<<" ---- c o n t i n u i t y e q u a t i o n ----"<<std::endl; */ if (it == 0) { mpPressureStrategy->InitializeSolutionStep(); } /* else{ */ /* NormDp = mpPressureStrategy->Solve(); */ /* } */ NormDp = mpPressureStrategy->Solve(); if (BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "The norm of pressure is: " << NormDp << std::endl; double DpErrorNorm = 0; ConvergedContinuity = this->CheckPressureConvergence(NormDp, DpErrorNorm); KRATOS_INFO("TwoStepVPStrategy") << " iteration(" << it << ") Pressure error: " << DpErrorNorm << " presTol: " << mPressureTolerance << std::endl; // Check convergence if (it == maxIt - 1) { KRATOS_INFO("TwoStepVPStrategy") << " iteration(" << it << ") Final Pressure error: " << DpErrorNorm << " presTol: " << mPressureTolerance << std::endl; ConvergedContinuity = this->FixTimeStepContinuity(DpErrorNorm); } // ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); // double currentTime = rCurrentProcessInfo[TIME]; // double tolerance=0.0000000001; // if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt025s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt05s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt075s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt100s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } if (!ConvergedContinuity && BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Continuity equation did not reach the convergence tolerance." << std::endl; return ConvergedContinuity; } void ComputeErrorL2Norm() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double currentTime = rCurrentProcessInfo[TIME]; const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); long double sumErrorL2Velocity = 0; long double sumErrorL2VelocityX = 0; long double sumErrorL2VelocityY = 0; long double sumErrorL2Pressure = 0; long double sumErrorL2TauXX = 0; long double sumErrorL2TauYY = 0; long double sumErrorL2TauXY = 0; #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { Element::GeometryType &geometry = itElem->GetGeometry(); long double nodalArea = 0; if (dimension == 2) { nodalArea = geometry.Area() / 3.0; } else if (dimension == 3) { nodalArea = geometry.Volume() * 0.25; } long double bariPosX = 0; long double bariPosY = 0; long double eleErrorL2Velocity = 0; long double eleErrorL2VelocityX = 0; long double eleErrorL2VelocityY = 0; long double eleErrorL2Pressure = 0; //ShapeFunctionDerivativesArrayType DN_DX; Matrix NContainer; NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1); //this->CalculateGeometryData(DN_DX,NContainer,GaussWeights); const Vector &N = row(NContainer, 0); // itElem->EvaluateInPoint(elementalPressure,PRESSURE,N); const unsigned int NumNodes = geometry.size(); double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE); double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X); double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y); ; for (unsigned int i = 1; i < NumNodes; i++) { elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE); elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X); elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y); } for (unsigned int i = 0; i < geometry.size(); i++) { // index = i*dimension; const long double nodalPosX = geometry(i)->X(); const long double nodalPosY = geometry(i)->Y(); // const long double velX = geometry(i)->FastGetSolutionStepValue(VELOCITY_X); // const long double velY = geometry(i)->FastGetSolutionStepValue(VELOCITY_Y); // const long double pressure = geometry(i)->FastGetSolutionStepValue(PRESSURE); // long double expectedVelocityX = pow(posX,2) * (1.0-posX)*(1.0-posX) * ( 2.0*posY - 6.0*pow(posY,2) + 4.0*pow(posY,3) ); // long double expectedVelocityY = -pow(posY,2) * (1.0-posY)*(1.0-posY) * ( 2.0*posX - 6.0*pow(posX,2) + 4.0*pow(posX,3) ); // long double expectedPressure = -posX * (1.0-posX); // long double nodalErrorVelocityX = velX - expectedVelocityX; // long double nodalErrorVelocityY = velY - expectedVelocityY; // long double nodalErrorPressure = pressure - expectedPressure; // sumErrorL2Velocity += (pow(nodalErrorVelocityX,2) + pow(nodalErrorVelocityY,2)) * nodalArea; // sumErrorL2VelocityX += pow(nodalErrorVelocityX,2) * nodalArea; // sumErrorL2VelocityY += pow(nodalErrorVelocityY,2) * nodalArea; // sumErrorL2Pressure += pow(nodalErrorPressure,2) * nodalArea; // eleErrorL2Velocity += pow(nodalErrorVelocityX,2) + pow(nodalErrorVelocityY,2); // eleErrorL2VelocityX += pow(nodalErrorVelocityX,2); // eleErrorL2VelocityY += pow(nodalErrorVelocityY,2); // eleErrorL2Pressure += pow(nodalErrorPressure,2); bariPosX += nodalPosX / 3.0; bariPosY += nodalPosY / 3.0; } const long double posX = bariPosX; const long double posY = bariPosY; long double expectedVelocityX = pow(posX, 2) * (1.0 - posX) * (1.0 - posX) * (2.0 * posY - 6.0 * pow(posY, 2) + 4.0 * pow(posY, 3)); long double expectedVelocityY = -pow(posY, 2) * (1.0 - posY) * (1.0 - posY) * (2.0 * posX - 6.0 * pow(posX, 2) + 4.0 * pow(posX, 3)); long double expectedPressure = -posX * (1.0 - posX); eleErrorL2VelocityX = elementalVelocityX - expectedVelocityX; eleErrorL2VelocityY = elementalVelocityY - expectedVelocityY; eleErrorL2Pressure = elementalPressure - expectedPressure; sumErrorL2VelocityX += pow(eleErrorL2VelocityX, 2) * geometry.Area(); sumErrorL2VelocityY += pow(eleErrorL2VelocityY, 2) * geometry.Area(); sumErrorL2Pressure += pow(eleErrorL2Pressure, 2) * geometry.Area(); // sumErrorL2Velocity += eleErrorL2Velocity * geometry.Area(); // sumErrorL2VelocityX += eleErrorL2VelocityX * geometry.Area(); // sumErrorL2VelocityY += eleErrorL2VelocityY * geometry.Area(); // sumErrorL2Pressure += eleErrorL2Pressure * geometry.Area(); const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX); const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY); const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY); long double expectedTauXX = 2.0 * (-4.0 * (1.0 - bariPosX) * bariPosX * (-1.0 + 2.0 * bariPosX) * bariPosY * (1.0 - 3.0 * bariPosY + 2.0 * pow(bariPosY, 2))); long double expectedTauYY = 2.0 * (4.0 * bariPosX * (1.0 - 3.0 * bariPosX + 2.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * bariPosY * (-1.0 + 2.0 * bariPosY)); long double expectedTauXY = (2.0 * (1.0 - 6.0 * bariPosY + 6.0 * pow(bariPosY, 2)) * (1.0 - bariPosX) * (1.0 - bariPosX) * pow(bariPosX, 2) - 2.0 * (1.0 - 6.0 * bariPosX + 6.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * (1 - bariPosY) * pow(bariPosY, 2)); long double nodalErrorTauXX = tauXX - expectedTauXX; long double nodalErrorTauYY = tauYY - expectedTauYY; long double nodalErrorTauXY = tauXY - expectedTauXY; // std::cout<<"tauXX "<<tauXX<<" expectedtauXX "<<expectedTauXX<<" nodalErrorTauXX "<<nodalErrorTauXX<<std::endl; // std::cout<<"tauyy "<<tauYY<<" expectedtauYY "<<expectedTauYY<<" nodalErrorTauYY "<<nodalErrorTauYY<<std::endl; // std::cout<<"tauXY "<<tauXY<<" expectedtauXY "<<expectedTauXY<<" nodalErrorTauXY "<<nodalErrorTauXY<<std::endl; sumErrorL2TauXX += pow(nodalErrorTauXX, 2) * geometry.Area(); sumErrorL2TauYY += pow(nodalErrorTauYY, 2) * geometry.Area(); sumErrorL2TauXY += pow(nodalErrorTauXY, 2) * geometry.Area(); } } // long double errorL2Velocity = sumErrorL2Velocity; // long double errorL2VelocityX = sumErrorL2VelocityX; // long double errorL2VelocityY = sumErrorL2VelocityY; // long double errorL2Pressure = sumErrorL2Pressure; long double errorL2Velocity = sqrt(sumErrorL2Velocity); long double errorL2VelocityX = sqrt(sumErrorL2VelocityX); long double errorL2VelocityY = sqrt(sumErrorL2VelocityY); long double errorL2Pressure = sqrt(sumErrorL2Pressure); long double errorL2TauXX = sqrt(sumErrorL2TauXX); long double errorL2TauYY = sqrt(sumErrorL2TauYY); long double errorL2TauXY = sqrt(sumErrorL2TauXY); std::ofstream myfileVelocity; myfileVelocity.open("errorL2VelocityFile.txt", std::ios::app); myfileVelocity << currentTime << "\t" << errorL2Velocity << "\n"; myfileVelocity.close(); std::ofstream myfileVelocityX; myfileVelocityX.open("errorL2VelocityXFile.txt", std::ios::app); myfileVelocityX << currentTime << "\t" << errorL2VelocityX << "\n"; myfileVelocityX.close(); std::ofstream myfileVelocityY; myfileVelocityY.open("errorL2VelocityYFile.txt", std::ios::app); myfileVelocityY << currentTime << "\t" << errorL2VelocityY << "\n"; myfileVelocityY.close(); std::ofstream myfilePressure; myfilePressure.open("errorL2PressureFile.txt", std::ios::app); myfilePressure << currentTime << "\t" << errorL2Pressure << "\n"; myfilePressure.close(); std::ofstream myfileTauXX; myfileTauXX.open("errorL2TauXXFile.txt", std::ios::app); myfileTauXX << currentTime << "\t" << errorL2TauXX << "\n"; myfileTauXX.close(); std::ofstream myfileTauYY; myfileTauYY.open("errorL2TauYYFile.txt", std::ios::app); myfileTauYY << currentTime << "\t" << errorL2TauYY << "\n"; myfileTauYY.close(); std::ofstream myfileTauXY; myfileTauXY.open("errorL2TauXYFile.txt", std::ios::app); myfileTauXY << currentTime << "\t" << errorL2TauXY << "\n"; myfileTauXY.close(); } void ComputeErrorL2NormCasePoiseuille() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double currentTime = rCurrentProcessInfo[TIME]; const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); double sumErrorL2VelocityTheta = 0; double sumErrorL2TauTheta = 0; double r_in = 0.2; double R_out = 0.5; double kappa = r_in / R_out; double omega = 0.5; double viscosity = 100.0; #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { Element::GeometryType &geometry = itElem->GetGeometry(); long double nodalArea = 0; if (dimension == 2) { nodalArea = geometry.Area() / 3.0; } else if (dimension == 3) { nodalArea = geometry.Volume() * 0.25; } long double bariPosX = 0; long double bariPosY = 0; long double eleErrorL2Velocity = 0; long double eleErrorL2VelocityX = 0; long double eleErrorL2VelocityY = 0; long double eleErrorL2Pressure = 0; //ShapeFunctionDerivativesArrayType DN_DX; Matrix NContainer; NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1); //this->CalculateGeometryData(DN_DX,NContainer,GaussWeights); const Vector &N = row(NContainer, 0); // itElem->EvaluateInPoint(elementalPressure,PRESSURE,N); const unsigned int NumNodes = geometry.size(); double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE); double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X); double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y); ; for (unsigned int i = 1; i < NumNodes; i++) { elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE); elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X); elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y); } for (unsigned int i = 0; i < geometry.size(); i++) { // index = i*dimension; const long double nodalPosX = geometry(i)->X(); const long double nodalPosY = geometry(i)->Y(); bariPosX += nodalPosX / 3.0; bariPosY += nodalPosY / 3.0; } const long double posX = bariPosX; const long double posY = bariPosY; const double rPos = sqrt(pow(posX, 2) + pow(posY, 2)); const double cosalfa = posX / rPos; const double sinalfa = posY / rPos; const double sin2alfa = 2.0 * cosalfa * sinalfa; const double cos2alfa = 1.0 - 2.0 * pow(sinalfa, 2); double expectedVelocityTheta = pow(kappa, 2) * omega * R_out / (1.0 - pow(kappa, 2)) * (R_out / rPos - rPos / R_out); double computedVelocityTheta = sqrt(pow(elementalVelocityX, 2) + pow(elementalVelocityY, 2)); double nodalErrorVelocityTheta = computedVelocityTheta - expectedVelocityTheta; const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX); const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY); const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY); double expectedTauTheta = (2.0 * viscosity * pow(kappa, 2) * omega * pow(R_out, 2)) / (1.0 - pow(kappa, 2)) / pow(rPos, 2); double computedTauTheta = (tauXX - tauYY) * sin2alfa / 2.0 - tauXY * cos2alfa; double nodalErrorTauTheta = computedTauTheta - expectedTauTheta; sumErrorL2VelocityTheta += pow(nodalErrorVelocityTheta, 2) * geometry.Area(); sumErrorL2TauTheta += pow(nodalErrorTauTheta, 2) * geometry.Area(); } } double errorL2VelocityTheta = sqrt(sumErrorL2VelocityTheta); double errorL2TauTheta = sqrt(sumErrorL2TauTheta); std::ofstream myfileVelocity; myfileVelocity.open("errorL2Poiseuille.txt", std::ios::app); myfileVelocity << currentTime << "\t" << errorL2VelocityTheta << "\t" << errorL2TauTheta << "\n"; myfileVelocity.close(); } bool CheckVelocityConvergence(const double NormDv, double &errorNormDv) { ModelPart &rModelPart = BaseType::GetModelPart(); double NormV = 0.00; errorNormDv = 0; #pragma omp parallel reduction(+ \ : NormV) { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY); double NormVelNode = 0; for (unsigned int d = 0; d < 3; ++d) { NormVelNode += Vel[d] * Vel[d]; NormV += Vel[d] * Vel[d]; } } } BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV); NormV = sqrt(NormV); if (NormV == 0.0) NormV = 1.00; errorNormDv = NormDv / NormV; if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) { std::cout << "The norm of velocity increment is: " << NormDv << std::endl; std::cout << "The norm of velocity is: " << NormV << std::endl; std::cout << "Velocity error: " << errorNormDv << "mVelocityTolerance: " << mVelocityTolerance << std::endl; } /* else{ */ /* std::cout<<"Velocity error: "<< errorNormDv <<" velTol: " << mVelocityTolerance<< std::endl; */ /* } */ if (errorNormDv < mVelocityTolerance) { return true; } else { return false; } } bool CheckPressureConvergence(const double NormDp, double &errorNormDp) { ModelPart &rModelPart = BaseType::GetModelPart(); double NormP = 0.00; errorNormDp = 0; #pragma omp parallel reduction(+ \ : NormP) { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double Pr = itNode->FastGetSolutionStepValue(PRESSURE); NormP += Pr * Pr; } } BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP); NormP = sqrt(NormP); if (NormP == 0.0) NormP = 1.00; errorNormDp = NormDp / NormP; if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) { std::cout << " The norm of pressure increment is: " << NormDp << std::endl; std::cout << " The norm of pressure is: " << NormP << std::endl; std::cout << " Pressure error: " << errorNormDp << std::endl; } /* else{ */ /* std::cout<<" Pressure error: "<<errorNormDp <<" presTol: "<<mPressureTolerance << std::endl; */ /* } */ if (errorNormDp < mPressureTolerance) { return true; } else return false; } bool FixTimeStepMomentum(const double DvErrorNorm) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double minTolerance = 0.005; bool fixedTimeStep = false; if (currentTime < 10 * timeInterval) { minTolerance = 10; } if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) && DvErrorNorm != 0 && (DvErrorNorm != 1 || currentTime > timeInterval)) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true); std::cout << "NOT GOOD CONVERGENCE!!! I'll reduce the next time interval" << DvErrorNorm << std::endl; minTolerance = 0.05; if (DvErrorNorm > minTolerance) { std::cout << "BAD CONVERGENCE!!! I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << DvErrorNorm << std::endl; fixedTimeStep = true; #pragma omp parallel { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1); itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1); itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1); } } } } else { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); } return fixedTimeStep; } bool CheckMomentumConvergence(const double DvErrorNorm) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double minTolerance = 0.99999; bool fixedTimeStep = false; if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) && DvErrorNorm != 0 && (DvErrorNorm != 1 || currentTime > timeInterval)) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true); std::cout << " BAD CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.9999" << std::endl; std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl; fixedTimeStep = true; #pragma omp parallel { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1); itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1); itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1); } } } else { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); } return fixedTimeStep; } bool FixTimeStepContinuity(const double DvErrorNorm) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double minTolerance = 0.01; bool fixedTimeStep = false; if (currentTime < 10 * timeInterval) { minTolerance = 10; } if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) && DvErrorNorm != 0 && (DvErrorNorm != 1 || currentTime > timeInterval)) { fixedTimeStep = true; rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, true); if (DvErrorNorm > 10 * minTolerance) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true); std::cout << " BAD CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.9999" << std::endl; std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl; fixedTimeStep = true; #pragma omp parallel { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1); itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1); itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1); } } } } else { rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false); } return fixedTimeStep; } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ double mVelocityTolerance; double mPressureTolerance; unsigned int mMaxPressureIter; unsigned int mDomainSize; unsigned int mTimeOrder; bool mReformDofSet; // Fractional step index. /* 1 : Momentum step (calculate fractional step velocity) * 2-3 : Unused (reserved for componentwise calculation of frac step velocity) * 4 : Pressure step * 5 : Computation of projections * 6 : End of step velocity */ // unsigned int mStepId; /// Scheme for the solution of the momentum equation StrategyPointerType mpMomentumStrategy; /// Scheme for the solution of the mass equation StrategyPointerType mpPressureStrategy; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ virtual void InitializeStrategy(SolverSettingsType &rSolverConfig) { KRATOS_TRY; mTimeOrder = rSolverConfig.GetTimeOrder(); // Check that input parameters are reasonable and sufficient. this->Check(); //ModelPart& rModelPart = this->GetModelPart(); mDomainSize = rSolverConfig.GetDomainSize(); mReformDofSet = rSolverConfig.GetReformDofSet(); BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel()); // Initialize strategies for each step bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity, mpMomentumStrategy); if (HaveVelStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Velocity, mVelocityTolerance); /* rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter); */ } else { KRATOS_THROW_ERROR(std::runtime_error, "TwoStepVPStrategy error: No Velocity strategy defined in FractionalStepSettings", ""); } bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure, mpPressureStrategy); if (HavePressStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Pressure, mPressureTolerance); rSolverConfig.FindMaxIter(SolverSettingsType::Pressure, mMaxPressureIter); } else { KRATOS_THROW_ERROR(std::runtime_error, "TwoStepVPStrategy error: No Pressure strategy defined in FractionalStepSettings", ""); } // Check input parameters this->Check(); KRATOS_CATCH(""); } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. TwoStepVPStrategy &operator=(TwoStepVPStrategy const &rOther) {} /// Copy constructor. TwoStepVPStrategy(TwoStepVPStrategy const &rOther) {} ///@} }; /// Class TwoStepVPStrategy ///@} ///@name Type Definitions ///@{ ///@} ///@} // addtogroup } // namespace Kratos. #endif // KRATOS_TWO_STEP_V_P_STRATEGY_H
parallel_invoker.h
// Copyright 2019 The MediaPipe Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Parallel for loop execution. // For details adapt parallel_using_* flags defined in parallel_invoker.cc. // Usage example (for 1D): // Define Functor or lambda function that implements: // void operator()(const BlockedRange & range) const; // (in addition functor needs to be copyable). // Execute a for loop in parallel from 0 to N via: // ParallelFor(0, // start_index // num_frames, // end_index, exclusive // 1 // number of elements processed per iteration // [](const BlockedRange& range) { // // Process per-thread sub-range // for (int i = range.begin(); i < range.end(); ++i) { // // Process i'th item. // } // } // Specific implementation to copy a vector of images in parallel. // class CopyInvoker { // public: // CopyInvoker(const vector<cv::Mat>& inputs, // vector<cv::Mat*>* outputs) // : inputs_(inputs), outputs_(outputs) { // } // CopyInvoker(const CopyInvoker& rhs) // : inputs_(rhs.inputs_), outputs_(rhs.outputs) { // } // void operator()(const BlockedRange& range) { // for (int frame = range.begin(); frame < range.end(); ++frame) { // inputs_[frame].copyTo(*(*outputs_)[frame]); // } // } // private: // const vector<cv::Mat>& inputs_; // vector<cv::Mat*>* outputs_; // } // vector<cv::Mat> inputs; // vector<cv::Mat*> outputs; // ParallelFor(0, num_frames, 1, CopyInvoker(inputs, &outputs)); // // OR (with lambdas): // ParallelFor(0, num_frames, 1, // [&inputs, &outputs](const BlockedRange& range) { // for (int frame = range.begin(); frame < range.end(); ++frame) { // inputs[frame].copyTo(*(outputs)[frame]); // } // } #ifndef MEDIAPIPE_UTIL_TRACKING_PARALLEL_INVOKER_H_ #define MEDIAPIPE_UTIL_TRACKING_PARALLEL_INVOKER_H_ #include <stddef.h> #include <memory> #include "absl/synchronization/mutex.h" #include "mediapipe/framework/port/logging.h" #ifdef PARALLEL_INVOKER_ACTIVE #include "mediapipe/framework/port/threadpool.h" #ifdef __APPLE__ #include <dispatch/dispatch.h> #include <stdatomic.h> #endif #endif // PARALLEL_INVOKER_ACTIVE // Specifies parallelization implementation to use. enum PARALLEL_INVOKER_MODE { PARALLEL_INVOKER_NONE = 0, // Uses single threaded execution PARALLEL_INVOKER_THREAD_POOL = 1, // Uses //thread/threadpool PARALLEL_INVOKER_OPENMP = 2, // Uses OpenMP (requires compiler support) PARALLEL_INVOKER_GCD = 3, // Uses GCD (Apple) PARALLEL_INVOKER_MAX_VALUE = 4, // Increase when adding more modes }; extern int flags_parallel_invoker_mode; extern int flags_parallel_invoker_max_threads; // Note flag: Parallel processing only activated if // PARALLEL_INVOKER_ACTIVE is defined. namespace mediapipe { // Partitions the range [begin, end) into equal blocks of size grain_size each // (except last one, might be less than grain_size). class BlockedRange { public: BlockedRange(int begin, int end, int grain_size) : begin_(begin), end_(end), grain_size_(grain_size) {} int begin() const { return begin_; } int end() const { return end_; } int grain_size() const { return grain_size_; } private: int begin_; int end_; int grain_size_; }; // Partitions the range row_range x col_range into equal // blocks of size row_range.grain_size() x col_range.grain_size() each // (except last column and row might be of size less than grain_size in one // or both of their dimensions). class BlockedRange2D { public: BlockedRange2D(const BlockedRange& rows, const BlockedRange& cols) : rows_(rows), cols_(cols) {} const BlockedRange& rows() const { return rows_; } const BlockedRange& cols() const { return cols_; } private: BlockedRange rows_; BlockedRange cols_; }; #ifdef PARALLEL_INVOKER_ACTIVE // Singleton ThreadPool for parallel invoker. ThreadPool* ParallelInvokerThreadPool(); #ifdef __APPLE__ // Enable to allow GCD as an option beside ThreadPool. #define USE_PARALLEL_INVOKER_GCD 1 #define CHECK_GCD_PARALLEL_WORK_COUNT DEBUG template <class Invoker> class ParallelInvokerGCDContext { public: ParallelInvokerGCDContext(const Invoker& invoker, const BlockedRange& rows) : local_invoker_(invoker), rows_(rows) { #if CHECK_GCD_PARALLEL_WORK_COUNT count_ = 0; #endif } const Invoker& invoker() { #if CHECK_GCD_PARALLEL_WORK_COUNT // Implicitly tracking the # of launched tasks at invoker retrieval. atomic_fetch_add(&count_, 1); #endif return local_invoker_; } const BlockedRange& rows() const { return rows_; } #if CHECK_GCD_PARALLEL_WORK_COUNT const int count() { return atomic_load(&count_); } #endif private: Invoker local_invoker_; const BlockedRange& rows_; #if CHECK_GCD_PARALLEL_WORK_COUNT _Atomic(int32_t) count_; #endif }; template <class Invoker> class ParallelInvokerGCDContext2D : public ParallelInvokerGCDContext<Invoker> { public: ParallelInvokerGCDContext2D(const Invoker& invoker, const BlockedRange& rows, const BlockedRange& cols) : ParallelInvokerGCDContext<Invoker>(invoker, rows), cols_(cols) {} const BlockedRange& cols() const { return cols_; } private: BlockedRange cols_; }; template <class Invoker> static void ParallelForGCDTask(void* context, size_t index) { ParallelInvokerGCDContext<Invoker>* invoker_context = static_cast<ParallelInvokerGCDContext<Invoker>*>(context); const BlockedRange& all_tasks = invoker_context->rows(); int start = all_tasks.begin() + index * all_tasks.grain_size(); int end = std::min(all_tasks.end(), start + all_tasks.grain_size()); BlockedRange this_task(start, end, all_tasks.grain_size()); const Invoker& invoker = invoker_context->invoker(); invoker(this_task); } template <class Invoker> static void ParallelForGCDTask2D(void* context, size_t index) { ParallelInvokerGCDContext2D<Invoker>* invoker_context = static_cast<ParallelInvokerGCDContext2D<Invoker>*>(context); // Partitioning across rows. const BlockedRange& all_tasks = invoker_context->rows(); int start = all_tasks.begin() + index * all_tasks.grain_size(); int end = std::min(all_tasks.end(), start + all_tasks.grain_size()); BlockedRange this_task(start, end, all_tasks.grain_size()); const Invoker& invoker = invoker_context->invoker(); invoker(BlockedRange2D(this_task, invoker_context->cols())); } #endif // __APPLE__ #endif // PARALLEL_INVOKER_ACTIVE // Simple wrapper for compatibility with below ParallelFor function. template <class Invoker> void SerialFor(size_t start, size_t end, size_t grain_size, const Invoker& invoker) { invoker(BlockedRange(start, end, 1)); } inline void CheckAndSetInvokerOptions() { #if defined(PARALLEL_INVOKER_ACTIVE) #if defined(__ANDROID__) // If unsupported option is selected, force usage of OpenMP if detected, and // ThreadPool otherwise. if (flags_parallel_invoker_mode != PARALLEL_INVOKER_NONE && flags_parallel_invoker_mode != PARALLEL_INVOKER_THREAD_POOL && flags_parallel_invoker_mode != PARALLEL_INVOKER_OPENMP) { #if defined(_OPENMP) LOG(WARNING) << "Unsupported invoker mode selected on Android. " << "OpenMP linkage detected, so falling back to OpenMP"; flags_parallel_invoker_mode = PARALLEL_INVOKER_OPENMP; #else // _OPENMP // Fallback mode for active parallel invoker without OpenMP is ThreadPool. LOG(WARNING) << "Unsupported invoker mode selected on Android. " << "Falling back to ThreadPool"; flags_parallel_invoker_mode = PARALLEL_INVOKER_THREAD_POOL; #endif // _OPENMP } #endif // __ANDROID__ #if defined(__APPLE__) || defined(__EMSCRIPTEN__) // Force usage of ThreadPool if unsupported option is selected. // (OpenMP is not supported on iOS, due to missing clang support). if (flags_parallel_invoker_mode != PARALLEL_INVOKER_NONE && #if defined(USE_PARALLEL_INVOKER_GCD) flags_parallel_invoker_mode != PARALLEL_INVOKER_GCD && #endif // USE_PARALLEL_INVOKER_GCD flags_parallel_invoker_mode != PARALLEL_INVOKER_THREAD_POOL) { LOG(WARNING) << "Unsupported invoker mode selected on iOS. " << "Falling back to ThreadPool mode"; flags_parallel_invoker_mode = PARALLEL_INVOKER_THREAD_POOL; } #endif // __APPLE__ || __EMSCRIPTEN__ #if !defined(__APPLE__) && !defined(__EMSCRIPTEN__) && !defined(__ANDROID__) flags_parallel_invoker_mode = PARALLEL_INVOKER_THREAD_POOL; #endif // !__APPLE__ && !__EMSCRIPTEN__ && !__ANDROID__ // If OpenMP is requested, make sure we can actually use it, and fall back // to ThreadPool if not. if (flags_parallel_invoker_mode == PARALLEL_INVOKER_OPENMP) { #if !defined(_OPENMP) LOG(ERROR) << "OpenMP invoker mode selected but not compiling with OpenMP " << "enabled. Falling back to ThreadPool"; flags_parallel_invoker_mode = PARALLEL_INVOKER_THREAD_POOL; #endif // _OPENMP } #else // PARALLEL_INVOKER_ACTIVE if (flags_parallel_invoker_mode != PARALLEL_INVOKER_NONE) { LOG(ERROR) << "Parallel execution requested but PARALLEL_INVOKER_ACTIVE " << "compile flag is not set. Falling back to single threaded " << "execution."; flags_parallel_invoker_mode = PARALLEL_INVOKER_NONE; } #endif // PARALLEL_INVOKER_ACTIVE CHECK_LT(flags_parallel_invoker_mode, PARALLEL_INVOKER_MAX_VALUE) << "Invalid invoker mode specified."; CHECK_GE(flags_parallel_invoker_mode, 0) << "Invalid invoker mode specified."; } // Performs parallel iteration from [start to end), scheduling grain_size // iterations per thread. For each iteration // invoker(BlockedRange(thread_local_start, thread_local_end)) // is called. Each thread is given its local copy of invoker, i.e. // invoker needs to have copy constructor defined. template <class Invoker> void ParallelFor(size_t start, size_t end, size_t grain_size, const Invoker& invoker) { #ifdef PARALLEL_INVOKER_ACTIVE CheckAndSetInvokerOptions(); switch (flags_parallel_invoker_mode) { #if defined(__APPLE__) case PARALLEL_INVOKER_GCD: { int iterations_remain = (end - start + grain_size - 1) / grain_size; CHECK_GT(iterations_remain, 0); if (iterations_remain == 1) { // Execute invoker serially. invoker(BlockedRange(start, std::min(end, start + grain_size), 1)); } else { BlockedRange all_tasks(start, end, grain_size); ParallelInvokerGCDContext<Invoker> context(invoker, all_tasks); dispatch_queue_t concurrent_queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); dispatch_apply_f(iterations_remain, concurrent_queue, &context, ParallelForGCDTask<Invoker>); #if CHECK_GCD_PARALLEL_WORK_COUNT CHECK_EQ(iterations_remain, context.count()); #endif } break; } #endif // __APPLE__ case PARALLEL_INVOKER_THREAD_POOL: { int iterations_remain = (end - start + grain_size - 1) / grain_size; CHECK_GT(iterations_remain, 0); if (iterations_remain == 1) { // Execute invoker serially. invoker(BlockedRange(start, std::min(end, start + grain_size), 1)); break; } struct { absl::Mutex mutex; absl::CondVar completed; int iterations_remain GUARDED_BY(mutex); } loop; { absl::MutexLock lock(&loop.mutex); loop.iterations_remain = iterations_remain; } for (int x = start; x < end; x += grain_size) { auto loop_func = [x, end, grain_size, &loop, invoker]() { // Execute invoker. invoker(BlockedRange(x, std::min(end, x + grain_size), 1)); // Decrement counter. absl::MutexLock lock(&loop.mutex); --loop.iterations_remain; if (loop.iterations_remain == 0) { loop.completed.SignalAll(); } }; // Attempt to run in parallel, if busy run serial to avoid deadlocking. // This can happen during nested invocation of ParallelFor, as if the // loop iteration itself is calling ParallelFor we might deadlock if // we can not guarantee for the iteration to be scheduled. ParallelInvokerThreadPool()->Schedule(loop_func); } // Wait on termination of all iterations. loop.mutex.Lock(); while (loop.iterations_remain > 0) { loop.completed.Wait(&loop.mutex); } loop.mutex.Unlock(); break; } case PARALLEL_INVOKER_OPENMP: { // Use thread-local copy of invoker. Invoker local_invoker(invoker); #pragma omp parallel for firstprivate(local_invoker) \ num_threads(flags_parallel_invoker_max_threads) for (int x = start; x < end; ++x) { local_invoker(BlockedRange(x, x + 1, 1)); } break; } case PARALLEL_INVOKER_NONE: { SerialFor(start, end, grain_size, invoker); break; } case PARALLEL_INVOKER_MAX_VALUE: { LOG(FATAL) << "Impossible."; break; } } #else SerialFor(start, end, grain_size, invoker); #endif // PARALLEL_INVOKER_ACTIVE } // Simple wrapper for compatibility with below ParallelFor2D function. template <class Invoker> void SerialFor2D(size_t start_row, size_t end_row, size_t start_col, size_t end_col, size_t grain_size, const Invoker& invoker) { invoker(BlockedRange2D(BlockedRange(start_row, end_row, 1), BlockedRange(start_col, end_col, 1))); } // Same as above ParallelFor for 2D iteration. template <class Invoker> void ParallelFor2D(size_t start_row, size_t end_row, size_t start_col, size_t end_col, size_t grain_size, const Invoker& invoker) { #ifdef PARALLEL_INVOKER_ACTIVE CheckAndSetInvokerOptions(); switch (flags_parallel_invoker_mode) { #if defined(__APPLE__) case PARALLEL_INVOKER_GCD: { const int iterations_remain = (end_row - start_row + grain_size - 1) / grain_size; CHECK_GT(iterations_remain, 0); if (iterations_remain == 1) { // Execute invoker serially. invoker(BlockedRange2D(BlockedRange(start_row, end_row, 1), BlockedRange(start_col, end_col, 1))); } else { BlockedRange all_tasks(start_row, end_row, grain_size); ParallelInvokerGCDContext2D<Invoker> context( invoker, all_tasks, BlockedRange(start_col, end_col, grain_size)); dispatch_queue_t concurrent_queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); dispatch_apply_f(iterations_remain, concurrent_queue, &context, ParallelForGCDTask2D<Invoker>); #if CHECK_GCD_PARALLEL_WORK_COUNT CHECK_EQ(iterations_remain, context.count()); #endif } break; } #endif // __APPLE__ case PARALLEL_INVOKER_THREAD_POOL: { int iterations_remain = end_row - start_row; // Guarded by loop_mutex CHECK_GT(iterations_remain, 0); if (iterations_remain == 1) { // Execute invoker serially. invoker(BlockedRange2D(BlockedRange(start_row, end_row, 1), BlockedRange(start_col, end_col, 1))); break; } absl::Mutex loop_mutex; absl::CondVar loop_completed; for (int y = start_row; y < end_row; ++y) { auto loop_func = [y, start_col, end_col, &loop_mutex, &loop_completed, &iterations_remain, invoker]() { // Execute invoker. invoker(BlockedRange2D(BlockedRange(y, y + 1, 1), BlockedRange(start_col, end_col, 1))); // Decrement counter. absl::MutexLock lock(&loop_mutex); --iterations_remain; if (iterations_remain == 0) { loop_completed.Signal(); } }; // Attempt to run in parallel, if busy run serial to avoid deadlocking. ParallelInvokerThreadPool()->Schedule(loop_func); } // Wait on termination of all iterations. loop_mutex.Lock(); while (iterations_remain > 0) { loop_completed.Wait(&loop_mutex); } loop_mutex.Unlock(); break; } case PARALLEL_INVOKER_OPENMP: { // Use thread-local copy of invoker. Invoker local_invoker(invoker); #pragma omp parallel for firstprivate(local_invoker) \ num_threads(flags_parallel_invoker_max_threads) for (int y = start_row; y < end_row; ++y) { local_invoker(BlockedRange2D(BlockedRange(y, y + 1, 1), BlockedRange(start_col, end_col, 1))); } break; } case PARALLEL_INVOKER_NONE: { SerialFor2D(start_row, end_row, start_col, end_col, grain_size, invoker); break; } case PARALLEL_INVOKER_MAX_VALUE: { LOG(FATAL) << "Impossible."; break; } } #else SerialFor2D(start_row, end_row, start_col, end_col, grain_size, invoker); #endif // PARALLEL_INVOKER_ACTIVE } } // namespace mediapipe #endif // MEDIAPIPE_UTIL_TRACKING_PARALLEL_INVOKER_H_
gbdt.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <LightGBM/cuda/vector_cudahost.h> #include <LightGBM/utils/json11.h> #include <LightGBM/utils/threading.h> #include <string> #include <algorithm> #include <cstdio> #include <fstream> #include <map> #include <memory> #include <mutex> #include <unordered_map> #include <utility> #include <vector> #include "score_updater.hpp" namespace LightGBM { using json11::Json; /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config* gbdt_config, const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting* other) override { auto other_gbdt = reinterpret_cast<const GBDT*>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto& tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto& tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } void ShuffleModels(int start_iter, int end_iter) override { int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iter = std::max(0, start_iter); if (end_iter <= 0) { end_iter = total_iter; } end_iter = std::min(total_iter, end_iter); auto original_models = std::move(models_); std::vector<int> indices(total_iter); for (int i = 0; i < total_iter; ++i) { indices[i] = i; } Random tmp_rand(17); for (int i = start_iter; i < end_iter - 1; ++i) { int j = tmp_rand.NextShort(i + 1, end_iter); std::swap(indices[i], indices[j]); } models_ = std::vector<std::unique_ptr<Tree>>(); for (int i = 0; i < total_iter; ++i) { for (int j = 0; j < num_tree_per_iteration_; ++j) { int tree_idx = indices[i] * num_tree_per_iteration_ + j; auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get()))); models_.push_back(std::move(new_tree)); } } } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config* gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset* valid_data, const std::vector<const Metric*>& valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequency of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string& model_output_path) override; void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ bool TrainOneIter(const score_t* gradients, const score_t* hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ const double* GetTrainingScore(int64_t* out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override; /*! * \brief Get number of prediction for one data * \param start_iteration Start index of the iteration to predict * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int start_iteration, int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_pred_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); start_iteration = std::max(start_iteration, 0); start_iteration = std::min(start_iteration, max_iteration); if (num_iteration > 0) { num_pred_in_one_row *= static_cast<int>(std::min(max_iteration - start_iteration, num_iteration)); } else { num_pred_in_one_row *= (max_iteration - start_iteration); } } else if (is_pred_contrib) { num_pred_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_pred_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop, int inner_thread_num) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop, int inner_thread_num) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; void PredictContrib(const double* features, double* output) const override; void PredictContribByMap(const std::unordered_map<int, double>& features, std::vector<std::unordered_map<int, double>>* output) const override; /*! * \brief Dump model to json format string * \param start_iteration The model will be saved start from * \param num_iteration Number of iterations that want to dump, -1 means dump all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \return Json format string of model */ std::string DumpModel(int start_iteration, int num_iteration, int feature_importance_type) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char* filename) const override; /*! * \brief Save model to file * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToFile(int start_iteration, int num_iterations, int feature_importance_type, const char* filename) const override; /*! * \brief Save model to string * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \return Non-empty string if succeeded */ std::string SaveModelToString(int start_iteration, int num_iterations, int feature_importance_type) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Calculate upper bound value * \return upper bound value */ double GetUpperBoundValue() const override; /*! * \brief Calculate lower bound value * \return lower bound value */ double GetLowerBoundValue() const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int start_iteration, int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iteration = std::max(start_iteration, 0); start_iteration = std::min(start_iteration, num_iteration_for_pred_); if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_ - start_iteration); } else { num_iteration_for_pred_ = num_iteration_for_pred_ - start_iteration; } start_iteration_for_pred_ = start_iteration; if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ const char* SubModelName() const override { return "tree"; } bool IsLinear() const override { return linear_tree_; } protected: virtual bool GetIsConstHessian(const ObjectiveFunction* objective_function) { if (objective_function != nullptr) { return objective_function->IsConstantHessian(); } else { return false; } } /*! * \brief Print eval result and check early stopping */ virtual bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config* config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); virtual data_size_t BaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer); data_size_t BalancedBaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree* tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const; /*! * \brief Print metric result of current iteration * \param iter Current iteration * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(int class_id, bool update_scorer); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Only use first metric for early stopping */ bool es_first_metric_only_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; #ifdef USE_CUDA /*! \brief First order derivative of training data */ std::vector<score_t, CHAllocator<score_t>> gradients_; /*! \brief Second order derivative of training data */ std::vector<score_t, CHAllocator<score_t>> hessians_; #else /*! \brief First order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> gradients_; /*! \brief Second order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> hessians_; #endif /*! \brief Store the indices of in-bag data */ std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Start iteration of used model */ int start_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; bool balanced_bagging_; std::string loaded_parameter_; std::vector<int8_t> monotone_constraints_; const int bagging_rand_block_ = 1024; std::vector<Random> bagging_rands_; ParallelPartitionRunner<data_size_t, false> bagging_runner_; Json forced_splits_json_; bool linear_tree_; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/draw.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/memory_.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/resample.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImageChannel() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImageChannel method is: % % MagickBooleanType CompositeImage(Image *image, % const CompositeOperator compose,Image *source_image, % const ssize_t x_offset,const ssize_t y_offset) % MagickBooleanType CompositeImageChannel(Image *image, % const ChannelType channel,const CompositeOperator compose, % Image *source_image,const ssize_t x_offset,const ssize_t y_offset) % % A description of each parameter follows: % % o image: the canvas image, modified by he composition % % o channel: the channel. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o source_image: the composite (source) image. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'source_image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o "compose:outside-overlay" % Modify how the composition is to effect areas not directly covered % by the 'source_image' at the offset given. Normally this is % dependant on the 'compose' method, especially Duff-Porter methods. % % If set to "false" then disable all normal handling of pixels not % covered by the source_image. Typically used for repeated tiling % of the source_image by the calling API. % % Previous to IM v6.5.3-3 this was called "modify-outside-overlay" % */ /* ** Programmers notes on SVG specification. ** ** A Composition is defined by... ** Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors ** Blending areas : X = 1 for area of overlap ie: f(Sc,Dc) ** Y = 1 for source preserved ** Z = 1 for canvas preserved ** ** Conversion to transparency (then optimized) ** Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) ** Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) ** ** Where... ** Sca = Sc*Sa normalized Source color divided by Source alpha ** Dca = Dc*Da normalized Dest color divided by Dest alpha ** Dc' = Dca'/Da' the desired color value for this channel. ** ** Da' in in the follow formula as 'gamma' The resulting alpla value. ** ** ** Most functions use a blending mode of over (X=1,Y=1,Z=1) ** this results in the following optimizations... ** gamma = Sa+Da-Sa*Da; ** gamma = 1 - QuantumScale*alpha * QuantumScale*beta; ** opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma ** ** The above SVG definitions also define that Mathematical Composition ** methods should use a 'Over' blending mode for Alpha Channel. ** It however was not applied for composition modes of 'Plus', 'Minus', ** the modulus versions of 'Add' and 'Subtract'. ** ** ** Mathematical operator changes to be applied from IM v6.7... ** ** 1/ Modulus modes 'Add' and 'Subtract' are obsoleted and renamed ** 'ModulusAdd' and 'ModulusSubtract' for clarity. ** ** 2/ All mathematical compositions work as per the SVG specification ** with regard to blending. This now includes 'ModulusAdd' and ** 'ModulusSubtract'. ** ** 3/ When the special channel flag 'sync' (syncronize channel updates) ** is turned off (enabled by default) then mathematical compositions are ** only performed on the channels specified, and are applied ** independantally of each other. In other words the mathematics is ** performed as 'pure' mathematical operations, rather than as image ** operations. */ static inline MagickRealType Atop(const MagickRealType p, const MagickRealType Sa,const MagickRealType q, const MagickRealType magick_unused(Da)) { magick_unreferenced(Da); return(p*Sa+q*(1.0-Sa)); /* Da optimized out, Da/gamma => 1.0 */ } static inline void CompositeAtop(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ composite->opacity=q->opacity; /* optimized Da = 1.0-Gamma */ composite->red=Atop(p->red,Sa,q->red,1.0); composite->green=Atop(p->green,Sa,q->green,1.0); composite->blue=Atop(p->blue,Sa,q->blue,1.0); if (q->colorspace == CMYKColorspace) composite->index=Atop(p->index,Sa,q->index,1.0); } /* What is this Composition method for? Can't find any specification! WARNING this is not doing correct 'over' blend handling (Anthony Thyssen). */ static inline void CompositeBumpmap(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType intensity; intensity=MagickPixelIntensity(p); composite->red=QuantumScale*intensity*q->red; composite->green=QuantumScale*intensity*q->green; composite->blue=QuantumScale*intensity*q->blue; composite->opacity=(MagickRealType) QuantumScale*intensity*p->opacity; if (q->colorspace == CMYKColorspace) composite->index=QuantumScale*intensity*q->index; } static inline void CompositeClear(const MagickPixelPacket *q, MagickPixelPacket *composite) { composite->opacity=(MagickRealType) TransparentOpacity; composite->red=0.0; composite->green=0.0; composite->blue=0.0; if (q->colorspace == CMYKColorspace) composite->index=0.0; } static MagickRealType ColorBurn(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { double SaSca; if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca-Da) < MagickEpsilon)) return(Sa*Da+Dca*(1.0-Sa)); if (Sca < MagickEpsilon) return(Dca*(1.0-Sa)); SaSca=Sa*PerceptibleReciprocal(Sca); return(Sa*Da-Sa*MagickMin(Da,(Da-Dca)*SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeColorBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*ColorBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType ColorDodge(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* Oct 2004 SVG specification. */ if ((Sca*Da+Dca*Sa) >= Sa*Da) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #if 0 /* New specification, March 2009 SVG specification. This specification was also wrong of non-overlap cases. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*MagickMin(Da,Dca*Sa/(Sa-Sca))); #endif #if 0 /* Working from first principles using the original formula: f(Sc,Dc) = Dc/(1-Sc) This works correctly! Looks like the 2004 model was right but just required a extra condition for correct handling. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #endif } static inline void CompositeColorDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*ColorDodge(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorDodge(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorDodge(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorDodge(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Darken(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p < q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeDarken(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ double gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Darken(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Darken(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Darken(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Darken(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMax(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMin(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMin(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMin(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMin(p->index,q->index); } } static inline void CompositeDarkenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use intensity only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) < Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) < MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } static inline MagickRealType Difference(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { /* Optimized by Multipling by QuantumRange (taken from gamma). */ return(Sa*p+Da*q-Sa*Da*2.0*MagickMin(p,q)); } static inline void CompositeDifference(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); /* Values are not normalized as an optimization. */ composite->red=gamma*Difference(p->red,Sa,q->red,Da); composite->green=gamma*Difference(p->green,Sa,q->green,Da); composite->blue=gamma*Difference(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Difference(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-fabs((double) (p->opacity-q->opacity)); if ( (channel & RedChannel) != 0 ) composite->red=fabs((double) (p->red-q->red)); if ( (channel & GreenChannel) != 0 ) composite->green=fabs((double) (p->green-q->green)); if ( (channel & BlueChannel) != 0 ) composite->blue=fabs((double) (p->blue-q->blue)); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=fabs((double) (p->index-q->index)); } } static MagickRealType Divide(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { /* Divide Source by Destination f(Sc,Dc) = Sc / Dc But with appropriate handling for special case of Dc == 0 specifically so that f(Black,Black)=Black and f(non-Black,Black)=White. It is however also important to correctly do 'over' alpha blending which is why the formula becomes so complex. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Dca) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sca*Da*Da*PerceptibleReciprocal(Dca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeDivide(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Divide(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Divide(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Divide(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Divide(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Divide(Sa,1.0,Da,1.0)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Divide(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Divide(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Divide(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Divide(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0); } } static MagickRealType Exclusion(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { return(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeExclusion(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType gamma, Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Exclusion(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Exclusion(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Exclusion(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Exclusion(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ((channel & AlphaChannel) != 0) composite->opacity=QuantumRange*(1.0-Exclusion(Sa,1.0,Da,1.0)); if ((channel & RedChannel) != 0) composite->red=QuantumRange*Exclusion(QuantumScale*p->red,1.0, QuantumScale*q->red,1.0); if ((channel & GreenChannel) != 0) composite->green=QuantumRange*Exclusion(QuantumScale*p->green,1.0, QuantumScale*q->green,1.0); if ((channel & BlueChannel) != 0) composite->blue=QuantumRange*Exclusion(QuantumScale*p->blue,1.0, QuantumScale*q->blue,1.0); if (((channel & IndexChannel) != 0) && (q->colorspace == CMYKColorspace)) composite->index=QuantumRange*Exclusion(QuantumScale*p->index,1.0, QuantumScale*q->index,1.0); } } static MagickRealType HardLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { if ((2.0*Sca) < Sa) return(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeHardLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*HardLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*HardLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*HardLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*HardLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType HardMix(const MagickRealType Sca, const MagickRealType Dca) { if ((Sca+Dca) < QuantumRange) return(0.0); else return(1.0); } static inline void CompositeHardMix(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*HardMix(p->red*Sa,q->red*Da); composite->green=gamma*HardMix(p->green*Sa,q->green*Da); composite->blue=gamma*HardMix(p->blue*Sa,q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*HardMix(p->index*Sa,q->index*Da); } static void HCLComposite(const double hue,const double chroma,const double luma, MagickRealType *red,MagickRealType *green,MagickRealType *blue) { double b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,double *hue,double *chroma,double *luma) { double b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (double *) NULL); assert(chroma != (double *) NULL); assert(luma != (double *) NULL); r=(double) red; g=(double) green; b=(double) blue; max=MagickMax(r,MagickMax(g,b)); c=max-(double) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == (MagickRealType) max) h=fmod((g-b)/c+6.0,6.0); else if (green == (MagickRealType) max) h=((b-r)/c)+2.0; else if (blue == (MagickRealType) max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static inline MagickRealType In(const MagickRealType p,const MagickRealType Sa, const MagickRealType magick_unused(q),const MagickRealType Da) { magick_unreferenced(q); return(Sa*p*Da); } static inline void CompositeIn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*Da; composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*In(p->red,Sa,q->red,Da); composite->green=gamma*In(p->green,Sa,q->green,Da); composite->blue=gamma*In(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*In(p->index,Sa,q->index,Da); } static inline MagickRealType Lighten(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p > q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeLighten(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Lighten is also equvalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ double gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Lighten(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Lighten(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Lighten(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Lighten(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMin(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMax(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMax(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMax(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMax(p->index,q->index); } } static inline void CompositeLightenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use Intenisty only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) > Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) > MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } #if 0 static inline MagickRealType LinearDodge(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearDodge: simplifies to a trivial formula f(Sc,Dc) = Sc + Dc Dca' = Sca + Dca */ return(Sca+Dca); } #endif static inline void CompositeLinearDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*(p->red*Sa+q->red*Da); composite->green=gamma*(p->green*Sa+q->green*Da); composite->blue=gamma*(p->blue*Sa+q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*(p->index*Sa+q->index*Da); } static inline MagickRealType LinearBurn(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ return(Sca+Dca-Sa*Da); } static inline void CompositeLinearBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*LinearBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType LinearLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { #if 0 /* Previous formula, was only valid for fully-opaque images. */ return(Dca+2*Sca-1.0); #else /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ return((Sca-Sa)*Da+Sca+Dca); #endif } static inline void CompositeLinearLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*LinearLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Mathematics(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da, const GeometryInfo *geometry_info) { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ return(geometry_info->rho*Sca*Dca+geometry_info->sigma*Sca*Da+ geometry_info->xi*Dca*Sa+geometry_info->psi*Sa*Da+Sca*(1.0-Da)+ Dca*(1.0-Sa)); } static inline void CompositeMathematics(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, const GeometryInfo *args, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* ??? - AT */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Mathematics(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da,args); composite->green=gamma*Mathematics(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da,args); composite->blue=gamma*Mathematics(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da,args); if (q->colorspace == CMYKColorspace) composite->index=gamma*Mathematics(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da,args); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Mathematics(Sa,1.0,Da,1.0,args)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Mathematics(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0,args); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Mathematics(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0,args); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Mathematics(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0,args); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Mathematics(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0,args); } } static inline void CompositePlus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { /* NOTE: "Plus" does not use 'over' alpha-blending but uses a special 'plus' form of alph-blending. It is the ONLY mathematical operator to do this. this is what makes it different to the otherwise equivalent "LinearDodge" composition method. Note however that color channels are still effected by the alpha channel as a result of the blending, making it just as useless for independant channel maths, just like all other mathematical composition methods. As such the removal of the 'sync' flag, is still a usful convention. The MagickPixelCompositePlus() function is defined in "composite-private.h" so it can also be used for Image Blending. */ MagickPixelCompositePlus(p,p->opacity,q,q->opacity,composite); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=p->opacity+q->opacity-QuantumRange; if ( (channel & RedChannel) != 0 ) composite->red=p->red+q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green+q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue+q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index+q->index; } } static inline MagickRealType Minus(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca, const MagickRealType magick_unused(Da)) { /* Minus Source from Destination f(Sc,Dc) = Sc - Dc */ magick_unreferenced(Da); return(Sca+Dca-2*Dca*Sa); } static inline void CompositeMinus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Minus(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Minus(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Minus(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Minus(p->index*Sa,Sa,q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-(Sa-Da)); if ( (channel & RedChannel) != 0 ) composite->red=p->red-q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green-q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue-q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index-q->index; } } static inline MagickRealType ModulusAdd(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { MagickRealType pixel; pixel=p+q; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; return(pixel*Sa*Da+p*Sa*(1.0-Da)+q*Da*(1.0-Sa)); } static inline void CompositeModulusAdd(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { double gamma; MagickRealType Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=ModulusAdd(p->red,Sa,q->red,Da); composite->green=ModulusAdd(p->green,Sa,q->green,Da); composite->blue=ModulusAdd(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusAdd(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusAdd(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusAdd(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusAdd(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,1.0,q->index,1.0); } } static inline MagickRealType ModulusSubtract(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { MagickRealType pixel; pixel=p-q; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; return(pixel*Sa*Da+p*Sa*(1.0-Da)+q*Da*(1.0-Sa)); } static inline void CompositeModulusSubtract(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma = RoundToUnity(Sa+Da-Sa*Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=ModulusSubtract(p->red,Sa,q->red,Da); composite->green=ModulusSubtract(p->green,Sa,q->green,Da); composite->blue=ModulusSubtract(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusSubtract(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusSubtract(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusSubtract(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusSubtract(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,1.0,q->index,1.0); } } static inline MagickRealType Multiply(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { return(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeMultiply(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Multiply(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Multiply(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Multiply(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Multiply(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Sa*Da); if ( (channel & RedChannel) != 0 ) composite->red=QuantumScale*p->red*q->red; if ( (channel & GreenChannel) != 0 ) composite->green=QuantumScale*p->green*q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumScale*p->blue*q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumScale*p->index*q->index; } } static inline MagickRealType Out(const MagickRealType p, const MagickRealType Sa,const MagickRealType magick_unused(q), const MagickRealType Da) { magick_unreferenced(q); return(Sa*p*(1.0-Da)); } static inline void CompositeOut(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*(1.0-Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Out(p->red,Sa,q->red,Da); composite->green=gamma*Out(p->green,Sa,q->green,Da); composite->blue=gamma*Out(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Out(p->index,Sa,q->index,Da); } static MagickRealType PegtopLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc See http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs(Da) < MagickEpsilon) return(Sca); return(Dca*Dca*(Sa-2.0*Sca)*PerceptibleReciprocal(Da)+Sca*(2.0*Dca+1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositePegtopLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*PegtopLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PegtopLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PegtopLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PegtopLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType PinLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if (Dca*Sa < Da*(2*Sca-Sa)) return(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); if ((Dca*Sa) > (2*Sca*Da)) return(Sca*Da+Sca+Dca*(1.0-Sa)); return(Sca*(1.0-Da)+Dca); } static inline void CompositePinLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*PinLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PinLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PinLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PinLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Screen(const MagickRealType Sca, const MagickRealType Dca) { /* Screen: A negated multiply f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ return(Sca+Dca-Sca*Dca); } static inline void CompositeScreen(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); Sa*=(MagickRealType) QuantumScale; Da*=(MagickRealType) QuantumScale; /* optimization */ gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Screen(p->red*Sa,q->red*Da); composite->green=gamma*Screen(p->green*Sa,q->green*Da); composite->blue=gamma*Screen(p->blue*Sa,q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Screen(p->index*Sa,q->index*Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Screen(Sa,Da)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange*Screen(QuantumScale*p->red, QuantumScale*q->red); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange*Screen(QuantumScale*p->green, QuantumScale*q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange*Screen(QuantumScale*p->blue, QuantumScale*q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange*Screen(QuantumScale*p->index, QuantumScale*q->index); } } static MagickRealType SoftLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { MagickRealType alpha, beta; alpha=Dca*PerceptibleReciprocal(Da); if ((2.0*Sca) < Sa) return(Dca*(Sa+(2.0*Sca-Sa)*(1.0-alpha))+Sca*(1.0-Da)+Dca*(1.0-Sa)); if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { beta=Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*alpha*(4.0*alpha+1.0)*(alpha-1.0)+7.0* alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); } beta=Dca*Sa+Da*(2.0*Sca-Sa)*(pow(alpha,0.5)-alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); } static inline void CompositeSoftLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*SoftLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*SoftLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*SoftLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*SoftLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } /* Deprecated Multiply difference by amount, if differance larger than threshold??? What use this is is completely unknown The Opacity calculation appears to be inverted -- Anthony Thyssen */ static inline MagickRealType Threshold(const MagickRealType p, const MagickRealType q,const MagickRealType threshold, const MagickRealType amount) { MagickRealType delta; delta=p-q; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) return(q); return(q+delta*amount); } static inline void CompositeThreshold(const MagickPixelPacket *p, const MagickPixelPacket *q,const MagickRealType threshold, const MagickRealType amount,MagickPixelPacket *composite) { composite->red=Threshold(p->red,q->red,threshold,amount); composite->green=Threshold(p->green,q->green,threshold,amount); composite->blue=Threshold(p->blue,q->blue,threshold,amount); composite->opacity=QuantumRange-Threshold(p->opacity,q->opacity, threshold,amount); if (q->colorspace == CMYKColorspace) composite->index=Threshold(p->index,q->index,threshold,amount); } static MagickRealType VividLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs(Sa) < MagickEpsilon) || (fabs(Sca-Sa) < MagickEpsilon)) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); if ((2*Sca) <= Sa) return(Sa*(Da+Sa*(Dca-Da)*PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+ Dca*(1.0-Sa)); return(Dca*Sa*Sa*PerceptibleReciprocal(2.0*(Sa-Sca))+Sca*(1.0-Da)+Dca* (1.0-Sa)); } static inline void CompositeVividLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*VividLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*VividLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*VividLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*VividLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType Xor(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { return(Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeXor(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa+Da-2*Sa*Da; /* Xor blend mode X=0,Y=1,Z=1 */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Xor(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Xor(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Xor(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Xor(p->index*Sa,Sa,q->index*Da,Da); } MagickExport MagickBooleanType CompositeImage(Image *image, const CompositeOperator compose,const Image *source_image, const ssize_t x_offset,const ssize_t y_offset) { MagickBooleanType status; status=CompositeImageChannel(image,DefaultChannels,compose,source_image, x_offset,y_offset); return(status); } MagickExport MagickBooleanType CompositeImageChannel(Image *image, const ChannelType channel,const CompositeOperator compose, const Image *composite,const ssize_t x_offset,const ssize_t y_offset) { #define CompositeImageTag "Composite/Image" CacheView *source_view, *image_view; const char *value; ExceptionInfo *exception; GeometryInfo geometry_info; Image *canvas_image, *source_image; MagickBooleanType clamp, clip_to_self, status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; /* Prepare composite image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); source_image=CloneImage(composite,0,0,MagickTrue,exception); if (source_image == (const Image *) NULL) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); (void) SetImageColorspace(source_image,image->colorspace); GetMagickPixelPacket(image,&zero); canvas_image=(Image *) NULL; amount=0.5; canvas_dissolve=1.0; clip_to_self=MagickTrue; percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case ClearCompositeOp: case SrcCompositeOp: case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: { /* Modify canvas outside the overlaid region. */ clip_to_self=MagickFalse; break; } case OverCompositeOp: { if (image->matte != MagickFalse) break; if (source_image->matte != MagickFalse) break; } case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) >= (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) >= (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const IndexPacket *source_indexes; register const PixelPacket *p; register IndexPacket *indexes; register PixelPacket *q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } source_indexes=GetCacheViewVirtualIndexQueue(source_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); (void) CopyMagickMemory(q,p,source_image->columns*sizeof(*p)); if ((indexes != (IndexPacket *) NULL) && (source_indexes != (const IndexPacket *) NULL)) (void) CopyMagickMemory(indexes,source_indexes, source_image->columns*sizeof(*indexes)); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { /* Modify canvas outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); clip_to_self=MagickFalse; break; } case BlurCompositeOp: { CacheView *canvas_view, *source_view; MagickPixelPacket pixel; MagickRealType angle_range, angle_start, height, width; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling. Blur Image dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidGeometry","'%s' '%s'","compose:args",value); source_image=DestroyImage(source_image); canvas_image=DestroyImage(canvas_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=height=geometry_info.rho*2.0; if ((flags & HeightValue) != 0 ) height=geometry_info.sigma*2.0; /* default the unrotated ellipse width and height axis vectors */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; /* rotate vectors if a rotation angle is given */ if ((flags & XValue) != 0 ) { MagickRealType angle; angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } /* Otherwise lets set a angle range and calculate in the loop */ angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, then restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter,1.0); /* do the variable blurring of each pixel in image */ pixel=zero; source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register PixelPacket *magick_restrict r; register IndexPacket *magick_restrict canvas_indexes; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes=GetCacheViewAuthenticIndexQueue(canvas_view); for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } if (fabs(angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale*GetPixelBlue(p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } #if 0 if ( x == 10 && y == 60 ) { fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n", blur.x1, blur.x2, blur.y1, blur.y2); fprintf(stderr, "scaled by=%lf,%lf\n", QuantumScale*GetPixelRed(p), QuantumScale*GetPixelGreen(p)); } #endif ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(p), blur.y1*QuantumScale*GetPixelGreen(p), blur.x2*QuantumScale*GetPixelRed(p), blur.y2*QuantumScale*GetPixelGreen(p)); (void) ResamplePixelColor(resample_filter,(double) x_offset+x,(double) y_offset+y,&pixel); SetPixelPacket(canvas_image,&pixel,r,canvas_indexes+x); p++; r++; } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); source_view=DestroyCacheView(source_view); canvas_view=DestroyCacheView(canvas_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *canvas_view, *source_view, *image_view; MagickPixelPacket pixel; MagickRealType horizontal_scale, vertical_scale; PointInfo center, offset; register IndexPacket *magick_restrict canvas_indexes; register PixelPacket *magick_restrict r; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue | HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0; vertical_scale=(MagickRealType) (source_image->rows-1)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1)/2.0; vertical_scale=(MagickRealType) (image->rows-1)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(source_image->columns-1)/200.0; vertical_scale*=(source_image->rows-1)/200.0; } else { horizontal_scale*=(image->columns-1)/200.0; vertical_scale*=(image->rows-1)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x=((MagickRealType) image->columns-1)/2.0; else center.x=(MagickRealType) (x_offset+(source_image->columns-1)/ 2.0); else if ((flags & AspectValue) == 0) center.x=(MagickRealType) (x_offset+geometry_info.xi); else center.x=geometry_info.xi; if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y=((MagickRealType) image->rows-1)/2.0; else center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0); else if ((flags & AspectValue) != 0) center.y=geometry_info.psi; else center.y=(MagickRealType) (y_offset+geometry_info.psi); } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ pixel=zero; image_view=AcquireVirtualCacheView(image,exception); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes=GetCacheViewAuthenticIndexQueue(canvas_view); for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } /* Displace the offset. */ offset.x=(double) ((horizontal_scale*(GetPixelRed(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0)); offset.y=(double) ((vertical_scale*(GetPixelGreen(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0)); status=InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); if (status == MagickFalse) break; /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.opacity=(MagickRealType) QuantumRange*(1.0-(1.0-QuantumScale* pixel.opacity)*(1.0-QuantumScale*GetPixelOpacity(p))); SetPixelPacket(canvas_image,&pixel,r,canvas_indexes+x); p++; r++; } if (x < (ssize_t) source_image->columns) break; sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } canvas_view=DestroyCacheView(canvas_view); source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { canvas_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; if ((canvas_dissolve-MagickEpsilon) < 0.0) canvas_dissolve=0.0; clip_to_self=MagickFalse; if ((canvas_dissolve+MagickEpsilon) > 1.0 ) { canvas_dissolve=1.0; clip_to_self=MagickTrue; } } break; } case BlendCompositeOp: { value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; clip_to_self=MagickFalse; if ((canvas_dissolve+MagickEpsilon) > 1.0) clip_to_self=MagickTrue; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. This Composition method is deprecated */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } value=GetImageArtifact(image,"compose:outside-overlay"); if (value != (const char *) NULL) clip_to_self=IsMagickTrue(value) == MagickFalse ? MagickTrue : MagickFalse; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsMagickTrue(value); /* Composite image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status=AccelerateCompositeImage(image,channel,compose,source_image, x_offset,y_offset,canvas_dissolve,source_dissolve,exception); if (status != MagickFalse) return(status); #endif status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; GetMagickPixelPacket(source_image,&zero); source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *pixels; double luma, hue, chroma, sans; MagickPixelPacket composite, canvas, source; register const IndexPacket *magick_restrict source_indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(PixelPacket *) NULL; p=(PixelPacket *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset; } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); source_indexes=GetCacheViewVirtualIndexQueue(source_view); GetMagickPixelPacket(source_image,&source); GetMagickPixelPacket(image,&canvas); hue=0.0; chroma=0.0; luma=0.0; for (x=0; x < (ssize_t) image->columns; x++) { if (clip_to_self != MagickFalse) { if (x < x_offset) { q++; continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } canvas.red=(MagickRealType) GetPixelRed(q); canvas.green=(MagickRealType) GetPixelGreen(q); canvas.blue=(MagickRealType) GetPixelBlue(q); if (image->matte != MagickFalse) canvas.opacity=(MagickRealType) GetPixelOpacity(q); if (image->colorspace == CMYKColorspace) canvas.index=(MagickRealType) GetPixelIndex(indexes+x); if (image->colorspace == CMYKColorspace) { canvas.red=(MagickRealType) QuantumRange-canvas.red; canvas.green=(MagickRealType) QuantumRange-canvas.green; canvas.blue=(MagickRealType) QuantumRange-canvas.blue; canvas.index=(MagickRealType) QuantumRange-canvas.index; } /* Handle canvas modifications outside overlaid region. */ composite=canvas; if ((pixels == (PixelPacket *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { switch (compose) { case DissolveCompositeOp: case BlendCompositeOp: { composite.opacity=(MagickRealType) (QuantumRange-canvas_dissolve* (QuantumRange-composite.opacity)); break; } case ClearCompositeOp: case SrcCompositeOp: { CompositeClear(&canvas,&composite); break; } case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { composite.opacity=(MagickRealType) TransparentOpacity; break; } default: { (void) GetOneVirtualMagickPixel(source_image,x-x_offset, y-y_offset,&composite,exception); break; } } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetPixelRed(q,clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q,clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q,clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); if (image->matte != MagickFalse) SetPixelOpacity(q,clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); q++; continue; } /* Handle normal overlay of source onto canvas. */ source.red=(MagickRealType) GetPixelRed(p); source.green=(MagickRealType) GetPixelGreen(p); source.blue=(MagickRealType) GetPixelBlue(p); if (source_image->matte != MagickFalse) source.opacity=(MagickRealType) GetPixelOpacity(p); if (source_image->colorspace == CMYKColorspace) source.index=(MagickRealType) GetPixelIndex(source_indexes+ x-x_offset); if (source_image->colorspace == CMYKColorspace) { source.red=(MagickRealType) QuantumRange-source.red; source.green=(MagickRealType) QuantumRange-source.green; source.blue=(MagickRealType) QuantumRange-source.blue; source.index=(MagickRealType) QuantumRange-source.index; } switch (compose) { /* Duff-Porter Compositions */ case ClearCompositeOp: { CompositeClear(&canvas,&composite); break; } case SrcCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: { composite=source; break; } case NoCompositeOp: case DstCompositeOp: break; case OverCompositeOp: case SrcOverCompositeOp: { MagickPixelCompositeOver(&source,source.opacity,&canvas, canvas.opacity,&composite); break; } case DstOverCompositeOp: { MagickPixelCompositeOver(&canvas,canvas.opacity,&source, source.opacity,&composite); break; } case SrcInCompositeOp: case InCompositeOp: { CompositeIn(&source,&canvas,&composite); break; } case DstInCompositeOp: { CompositeIn(&canvas,&source,&composite); break; } case OutCompositeOp: case SrcOutCompositeOp: { CompositeOut(&source,&canvas,&composite); break; } case DstOutCompositeOp: { CompositeOut(&canvas,&source,&composite); break; } case AtopCompositeOp: case SrcAtopCompositeOp: { CompositeAtop(&source,&canvas,&composite); break; } case DstAtopCompositeOp: { CompositeAtop(&canvas,&source,&composite); break; } case XorCompositeOp: { CompositeXor(&source,&canvas,&composite); break; } /* Mathematical Compositions */ case PlusCompositeOp: { CompositePlus(&source,&canvas,channel,&composite); break; } case MinusDstCompositeOp: { CompositeMinus(&source,&canvas,channel,&composite); break; } case MinusSrcCompositeOp: { CompositeMinus(&canvas,&source,channel,&composite); break; } case ModulusAddCompositeOp: { CompositeModulusAdd(&source,&canvas,channel,&composite); break; } case ModulusSubtractCompositeOp: { CompositeModulusSubtract(&source,&canvas,channel,&composite); break; } case DifferenceCompositeOp: { CompositeDifference(&source,&canvas,channel,&composite); break; } case ExclusionCompositeOp: { CompositeExclusion(&source,&canvas,channel,&composite); break; } case MultiplyCompositeOp: { CompositeMultiply(&source,&canvas,channel,&composite); break; } case ScreenCompositeOp: { CompositeScreen(&source,&canvas,channel,&composite); break; } case DivideDstCompositeOp: { CompositeDivide(&source,&canvas,channel,&composite); break; } case DivideSrcCompositeOp: { CompositeDivide(&canvas,&source,channel,&composite); break; } case DarkenCompositeOp: { CompositeDarken(&source,&canvas,channel,&composite); break; } case LightenCompositeOp: { CompositeLighten(&source,&canvas,channel,&composite); break; } case DarkenIntensityCompositeOp: { CompositeDarkenIntensity(&source,&canvas,channel,&composite); break; } case LightenIntensityCompositeOp: { CompositeLightenIntensity(&source,&canvas,channel,&composite); break; } case MathematicsCompositeOp: { CompositeMathematics(&source,&canvas,channel,&geometry_info, &composite); break; } /* Lighting Compositions */ case ColorDodgeCompositeOp: { CompositeColorDodge(&source,&canvas,&composite); break; } case ColorBurnCompositeOp: { CompositeColorBurn(&source,&canvas,&composite); break; } case LinearDodgeCompositeOp: { CompositeLinearDodge(&source,&canvas,&composite); break; } case LinearBurnCompositeOp: { CompositeLinearBurn(&source,&canvas,&composite); break; } case HardLightCompositeOp: { CompositeHardLight(&source,&canvas,&composite); break; } case HardMixCompositeOp: { CompositeHardMix(&source,&canvas,&composite); break; } case OverlayCompositeOp: { /* Overlay = Reversed HardLight. */ CompositeHardLight(&canvas,&source,&composite); break; } case SoftLightCompositeOp: { CompositeSoftLight(&source,&canvas,&composite); break; } case LinearLightCompositeOp: { CompositeLinearLight(&source,&canvas,&composite); break; } case PegtopLightCompositeOp: { CompositePegtopLight(&source,&canvas,&composite); break; } case VividLightCompositeOp: { CompositeVividLight(&source,&canvas,&composite); break; } case PinLightCompositeOp: { CompositePinLight(&source,&canvas,&composite); break; } /* Other Composition */ case ChangeMaskCompositeOp: { if ((composite.opacity > ((MagickRealType) QuantumRange/2.0)) || (IsMagickColorSimilar(&source,&canvas) != MagickFalse)) composite.opacity=(MagickRealType) TransparentOpacity; else composite.opacity=(MagickRealType) OpaqueOpacity; break; } case BumpmapCompositeOp: { if (source.opacity == TransparentOpacity) break; CompositeBumpmap(&source,&canvas,&composite); break; } case DissolveCompositeOp: { MagickPixelCompositeOver(&source,(MagickRealType) (QuantumRange- source_dissolve*(QuantumRange-source.opacity)),&canvas, (MagickRealType) (QuantumRange-canvas_dissolve*(QuantumRange- canvas.opacity)),&composite); break; } case BlendCompositeOp: { MagickPixelCompositeBlend(&source,source_dissolve,&canvas, canvas_dissolve,&composite); break; } case StereoCompositeOp: { canvas.red=(MagickRealType) GetPixelRed(p); break; } case ThresholdCompositeOp: { CompositeThreshold(&source,&canvas,threshold,amount,&composite); break; } case ModulateCompositeOp: { ssize_t offset; if (source.opacity == TransparentOpacity) break; offset=(ssize_t) (MagickPixelIntensityToQuantum(&source)-midpoint); if (offset == 0) break; CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&composite.red,&composite.green, &composite.blue); break; } case HueCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&hue,&sans,&sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case SaturateCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&sans,&chroma, &sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case LuminizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&sans,&sans, &luma); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case ColorizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&sans, &sans,&luma); CompositeHCL(source.red,source.green,source.blue,&hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { composite.red=source.red; break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { composite.green=source.green; break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { composite.blue=source.blue; break; } case CopyOpacityCompositeOp: { if (source.matte == MagickFalse) composite.opacity=(MagickRealType) (QuantumRange- MagickPixelIntensityToQuantum(&source)); else composite.opacity=source.opacity; break; } case CopyBlackCompositeOp: { if (source.colorspace != CMYKColorspace) ConvertRGBToCMYK(&source); composite.index=QuantumRange-source.index; break; } /* compose methods that are already handled */ case BlurCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: { composite=source; break; } default: break; } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetPixelRed(q,clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q,clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q,clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); SetPixelOpacity(q,clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); p++; if (p >= (pixels+source_image->columns)) p=pixels; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImageChannel) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); if (canvas_image != (Image * ) NULL) canvas_image=DestroyImage(canvas_image); else source_image=DestroyImage(source_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture) % % A description of each parameter follows: % % o image: the image. % % o texture: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; ExceptionInfo *exception; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->matte != MagickFalse) || (texture_image->matte != MagickFalse))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,image->compose,texture_image,x+ texture_image->tile_offset.x,y+texture_image->tile_offset.y); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,texture_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *texture_indexes; register const PixelPacket *p; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; size_t width; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,(y+ texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } texture_indexes=GetCacheViewVirtualIndexQueue(texture_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; (void) CopyMagickMemory(q,p,width*sizeof(*p)); if ((image->colorspace == CMYKColorspace) && (texture_image->colorspace == CMYKColorspace)) { (void) CopyMagickMemory(indexes,texture_indexes,width* sizeof(*indexes)); indexes+=width; } q+=width; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TextureImage) #endif proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }